From e7ee850d46d54789979bf0c5244bae1825fb7149 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 14 Apr 2024 22:19:53 +0200 Subject: Adding upstream version 0.91.0. Signed-off-by: Daniel Baumann --- .github/CODEOWNERS | 2 + .github/release-drafter.yml | 2 + .github/workflows/ack.yml | 9 + .github/workflows/push.yml | 12 + .github/workflows/release.yml | 48 + .github/workflows/tox.yml | 76 + .gitignore | 9 + .hgignore | 16 + .pre-commit-config.yaml | 48 + .readthedocs.yaml | 13 + CHANGES | 1085 +++++++++ LICENSE | 21 + MANIFEST.in | 2 + README.rst | 257 ++ _doc/Makefile | 216 ++ _doc/_static/license.svg | 1 + _doc/_static/pypi.svg | 1 + _doc/api.rst | 287 +++ _doc/basicuse.rst | 55 + _doc/conf.py | 298 +++ _doc/contributing.rst | 80 + _doc/contributing.ryd | 133 ++ _doc/detail.rst | 289 +++ _doc/dumpcls.rst | 101 + _doc/dumpcls.ryd | 107 + _doc/example.rst | 332 +++ _doc/index.rst | 27 + _doc/index.ryd | 56 + _doc/install.rst | 53 + _doc/overview.rst | 48 + _doc/pyyaml.rst | 80 + _doc/upmerge.rst | 97 + _test/__init__.py | 0 _test/data/a-nasty-libyaml-bug.loader-error | 1 + _test/data/aliases-cdumper-bug.code | 1 + _test/data/aliases.events | 8 + _test/data/bool.data | 18 + _test/data/bool.detect | 1 + _test/data/colon-in-flow-context.loader-error | 1 + _test/data/comment_no_eol.data | 1 + _test/data/composite_key.code | 1 + _test/data/composite_key.data | 4 + _test/data/construct-binary-py3.code | 7 + _test/data/construct-binary-py3.data | 12 + _test/data/construct-bool.code | 7 + _test/data/construct-bool.data | 9 + _test/data/construct-custom.code | 10 + _test/data/construct-custom.data | 26 + _test/data/construct-float.code | 8 + _test/data/construct-float.data | 6 + _test/data/construct-int.code | 8 + _test/data/construct-int.data | 6 + _test/data/construct-map.code | 6 + _test/data/construct-map.data | 6 + _test/data/construct-merge.code | 10 + _test/data/construct-merge.data | 27 + _test/data/construct-null.code | 13 + _test/data/construct-null.data | 18 + _test/data/construct-omap.code | 8 + _test/data/construct-omap.data | 8 + _test/data/construct-pairs.code | 9 + _test/data/construct-pairs.data | 7 + _test/data/construct-python-bool.code | 1 + _test/data/construct-python-bool.data | 1 + _test/data/construct-python-bytes-py3.code | 1 + _test/data/construct-python-bytes-py3.data | 1 + _test/data/construct-python-complex.code | 1 + _test/data/construct-python-complex.data | 8 + _test/data/construct-python-float.code | 1 + _test/data/construct-python-float.data | 1 + _test/data/construct-python-int.code | 1 + _test/data/construct-python-int.data | 1 + _test/data/construct-python-long-short-py3.code | 1 + _test/data/construct-python-long-short-py3.data | 1 + _test/data/construct-python-name-module.code | 1 + _test/data/construct-python-name-module.data | 5 + _test/data/construct-python-none.code | 1 + _test/data/construct-python-none.data | 1 + _test/data/construct-python-object.code | 23 + _test/data/construct-python-object.data | 21 + _test/data/construct-python-str-ascii.code | 1 + _test/data/construct-python-str-ascii.data | 1 + _test/data/construct-python-str-utf8-py2.code | 1 + _test/data/construct-python-str-utf8-py3.code | 1 + _test/data/construct-python-str-utf8-py3.data | 1 + _test/data/construct-python-tuple-list-dict.code | 6 + _test/data/construct-python-tuple-list-dict.data | 8 + _test/data/construct-python-unicode-ascii-py3.code | 1 + _test/data/construct-python-unicode-ascii-py3.data | 1 + _test/data/construct-python-unicode-utf8-py2.code | 1 + _test/data/construct-python-unicode-utf8-py3.code | 1 + _test/data/construct-python-unicode-utf8-py3.data | 1 + _test/data/construct-seq.code | 4 + _test/data/construct-seq.data | 15 + _test/data/construct-set.code | 4 + _test/data/construct-set.data | 7 + _test/data/construct-str-ascii.code | 1 + _test/data/construct-str-ascii.data | 1 + _test/data/construct-str-utf8-py2.code | 1 + _test/data/construct-str-utf8-py3.code | 1 + _test/data/construct-str-utf8-py3.data | 1 + _test/data/construct-str.code | 1 + _test/data/construct-str.data | 1 + _test/data/construct-timestamp.code | 7 + _test/data/construct-timestamp.data | 5 + _test/data/construct-value.code | 9 + _test/data/construct-value.data | 10 + ...ocument-separator-in-quoted-scalar.loader-error | 11 + _test/data/documents.events | 11 + _test/data/duplicate-anchor-1.loader-warning | 3 + _test/data/duplicate-anchor-2.loader-warning | 1 + .../duplicate-merge-key.former-loader-error.code | 1 + _test/data/duplicate-tag-directive.loader-error | 3 + _test/data/duplicate-yaml-directive.loader-error | 3 + ...lock-scalar-in-simple-key-context-bug.canonical | 6 + ...mit-block-scalar-in-simple-key-context-bug.data | 4 + ...ing-unacceptable-unicode-character-bug-py3.code | 1 + ...ing-unacceptable-unicode-character-bug-py3.data | 1 + ...unacceptable-unicode-character-bug-py3.skip-ext | 0 _test/data/empty-anchor.emitter-error | 5 + _test/data/empty-document-bug.canonical | 1 + _test/data/empty-document-bug.data | 0 _test/data/empty-document-bug.empty | 0 _test/data/empty-documents.single-loader-error | 2 + _test/data/empty-python-module.loader-error | 1 + _test/data/empty-python-name.loader-error | 1 + _test/data/empty-tag-handle.emitter-error | 5 + _test/data/empty-tag-prefix.emitter-error | 5 + _test/data/empty-tag.emitter-error | 5 + _test/data/expected-document-end.emitter-error | 6 + _test/data/expected-document-start.emitter-error | 4 + _test/data/expected-mapping.loader-error | 1 + _test/data/expected-node-1.emitter-error | 4 + _test/data/expected-node-2.emitter-error | 7 + _test/data/expected-nothing.emitter-error | 4 + _test/data/expected-scalar.loader-error | 1 + _test/data/expected-sequence.loader-error | 1 + _test/data/expected-stream-start.emitter-error | 2 + _test/data/explicit-document.single-loader-error | 4 + _test/data/fetch-complex-value-bug.loader-error | 2 + _test/data/float-representer-2.3-bug.code | 7 + _test/data/float-representer-2.3-bug.data | 5 + _test/data/float.data | 6 + _test/data/float.detect | 1 + _test/data/forbidden-entry.loader-error | 2 + _test/data/forbidden-key.loader-error | 2 + _test/data/forbidden-value.loader-error | 1 + _test/data/implicit-document.single-loader-error | 3 + _test/data/int.data | 7 + _test/data/int.detect | 1 + _test/data/invalid-anchor-1.loader-error | 1 + _test/data/invalid-anchor-2.loader-error | 8 + _test/data/invalid-anchor.emitter-error | 5 + _test/data/invalid-base64-data-2.loader-error | 2 + _test/data/invalid-base64-data.loader-error | 2 + .../invalid-block-scalar-indicator.loader-error | 2 + _test/data/invalid-character.loader-error | Bin 0 -> 2209 bytes _test/data/invalid-character.stream-error | Bin 0 -> 4193 bytes _test/data/invalid-directive-line.loader-error | 2 + _test/data/invalid-directive-name-1.loader-error | 2 + _test/data/invalid-directive-name-2.loader-error | 2 + _test/data/invalid-escape-character.loader-error | 1 + _test/data/invalid-escape-numbers.loader-error | 1 + .../invalid-indentation-indicator-1.loader-error | 2 + .../invalid-indentation-indicator-2.loader-error | 2 + ...nvalid-item-without-trailing-break.loader-error | 2 + _test/data/invalid-merge-1.loader-error | 2 + _test/data/invalid-merge-2.loader-error | 2 + _test/data/invalid-omap-1.loader-error | 3 + _test/data/invalid-omap-2.loader-error | 3 + _test/data/invalid-omap-3.loader-error | 4 + _test/data/invalid-pairs-1.loader-error | 3 + _test/data/invalid-pairs-2.loader-error | 3 + _test/data/invalid-pairs-3.loader-error | 4 + _test/data/invalid-python-bytes-2-py3.loader-error | 2 + _test/data/invalid-python-bytes-py3.loader-error | 2 + _test/data/invalid-python-module-kind.loader-error | 1 + .../data/invalid-python-module-value.loader-error | 1 + _test/data/invalid-python-module.loader-error | 1 + _test/data/invalid-python-name-kind.loader-error | 1 + .../data/invalid-python-name-module-2.loader-error | 1 + _test/data/invalid-python-name-module.loader-error | 1 + _test/data/invalid-python-name-object.loader-error | 1 + _test/data/invalid-python-name-value.loader-error | 1 + _test/data/invalid-simple-key.loader-error | 3 + _test/data/invalid-single-quote-bug.code | 1 + _test/data/invalid-single-quote-bug.data | 2 + _test/data/invalid-starting-character.loader-error | 1 + _test/data/invalid-tag-1.loader-error | 1 + _test/data/invalid-tag-2.loader-error | 1 + .../data/invalid-tag-directive-handle.loader-error | 2 + .../data/invalid-tag-directive-prefix.loader-error | 2 + _test/data/invalid-tag-handle-1.emitter-error | 5 + _test/data/invalid-tag-handle-1.loader-error | 2 + _test/data/invalid-tag-handle-2.emitter-error | 5 + _test/data/invalid-tag-handle-2.loader-error | 2 + _test/data/invalid-uri-escapes-1.loader-error | 1 + _test/data/invalid-uri-escapes-2.loader-error | 1 + _test/data/invalid-uri-escapes-3.loader-error | 1 + _test/data/invalid-uri.loader-error | 1 + _test/data/invalid-utf8-byte.loader-error | 66 + _test/data/invalid-utf8-byte.stream-error | 66 + .../invalid-yaml-directive-version-1.loader-error | 3 + .../invalid-yaml-directive-version-2.loader-error | 2 + .../invalid-yaml-directive-version-3.loader-error | 2 + .../invalid-yaml-directive-version-4.loader-error | 2 + .../invalid-yaml-directive-version-5.loader-error | 2 + .../invalid-yaml-directive-version-6.loader-error | 2 + _test/data/invalid-yaml-version.loader-error | 2 + _test/data/latin.unicode | 384 +++ _test/data/mappings.events | 44 + _test/data/merge.data | 1 + _test/data/merge.detect | 1 + _test/data/more-floats.code | 1 + _test/data/more-floats.data | 1 + _test/data/negative-float-bug.code | 1 + _test/data/negative-float-bug.data | 1 + _test/data/no-alias-anchor.emitter-error | 8 + _test/data/no-alias-anchor.skip-ext | 0 _test/data/no-block-collection-end.loader-error | 3 + _test/data/no-block-mapping-end-2.loader-error | 3 + _test/data/no-block-mapping-end.loader-error | 1 + _test/data/no-document-start.loader-error | 3 + _test/data/no-flow-mapping-end.loader-error | 1 + _test/data/no-flow-sequence-end.loader-error | 1 + _test/data/no-node-1.loader-error | 1 + _test/data/no-node-2.loader-error | 1 + _test/data/no-tag.emitter-error | 5 + _test/data/null.data | 3 + _test/data/null.detect | 1 + _test/data/odd-utf16.stream-error | Bin 0 -> 1311 bytes _test/data/omap.data | 8 + _test/data/omap.roundtrip | 0 _test/data/recursive-anchor.former-loader-error | 4 + _test/data/recursive-dict.recursive | 3 + _test/data/recursive-list.recursive | 2 + _test/data/recursive-set.recursive | 7 + _test/data/recursive-state.recursive | 2 + _test/data/recursive-tuple.recursive | 3 + _test/data/recursive.former-dumper-error | 3 + .../remove-possible-simple-key-bug.loader-error | 3 + _test/data/resolver.data | 30 + _test/data/resolver.path | 30 + _test/data/run-parser-crash-bug.data | 8 + _test/data/scalars.events | 28 + _test/data/scan-document-end-bug.canonical | 3 + _test/data/scan-document-end-bug.data | 3 + _test/data/scan-line-break-bug.canonical | 3 + _test/data/scan-line-break-bug.data | 3 + _test/data/sequences.events | 81 + .../data/serializer-is-already-opened.dumper-error | 3 + _test/data/serializer-is-closed-1.dumper-error | 4 + _test/data/serializer-is-closed-2.dumper-error | 4 + _test/data/serializer-is-not-opened-1.dumper-error | 2 + _test/data/serializer-is-not-opened-2.dumper-error | 2 + _test/data/single-dot-is-not-float-bug.code | 1 + _test/data/single-dot-is-not-float-bug.data | 1 + _test/data/sloppy-indentation.canonical | 18 + _test/data/sloppy-indentation.data | 17 + _test/data/spec-02-01.code | 1 + _test/data/spec-02-01.data | 3 + _test/data/spec-02-01.structure | 1 + _test/data/spec-02-01.tokens | 1 + _test/data/spec-02-02.data | 3 + _test/data/spec-02-02.structure | 1 + _test/data/spec-02-02.tokens | 5 + _test/data/spec-02-03.data | 8 + _test/data/spec-02-03.structure | 1 + _test/data/spec-02-03.tokens | 4 + _test/data/spec-02-04.data | 8 + _test/data/spec-02-04.structure | 4 + _test/data/spec-02-04.tokens | 4 + _test/data/spec-02-05.data | 3 + _test/data/spec-02-05.structure | 5 + _test/data/spec-02-05.tokens | 5 + _test/data/spec-02-06.data | 5 + _test/data/spec-02-06.structure | 4 + _test/data/spec-02-06.tokens | 4 + _test/data/spec-02-07.data | 10 + _test/data/spec-02-07.structure | 4 + _test/data/spec-02-07.tokens | 12 + _test/data/spec-02-08.data | 10 + _test/data/spec-02-08.structure | 4 + _test/data/spec-02-08.tokens | 15 + _test/data/spec-02-09.data | 8 + _test/data/spec-02-09.structure | 1 + _test/data/spec-02-09.tokens | 5 + _test/data/spec-02-10.data | 8 + _test/data/spec-02-10.structure | 1 + _test/data/spec-02-10.tokens | 5 + _test/data/spec-02-11.code | 10 + _test/data/spec-02-11.data | 9 + _test/data/spec-02-11.structure | 4 + _test/data/spec-02-11.tokens | 6 + _test/data/spec-02-12.data | 8 + _test/data/spec-02-12.structure | 5 + _test/data/spec-02-12.tokens | 6 + _test/data/spec-02-13.data | 4 + _test/data/spec-02-13.structure | 1 + _test/data/spec-02-13.tokens | 1 + _test/data/spec-02-14.data | 4 + _test/data/spec-02-14.structure | 1 + _test/data/spec-02-14.tokens | 1 + _test/data/spec-02-15.data | 8 + _test/data/spec-02-15.structure | 1 + _test/data/spec-02-15.tokens | 1 + _test/data/spec-02-16.data | 7 + _test/data/spec-02-16.structure | 1 + _test/data/spec-02-16.tokens | 5 + _test/data/spec-02-17.data | 7 + _test/data/spec-02-17.structure | 1 + _test/data/spec-02-17.tokens | 8 + _test/data/spec-02-18.data | 6 + _test/data/spec-02-18.structure | 1 + _test/data/spec-02-18.tokens | 4 + _test/data/spec-02-19.data | 5 + _test/data/spec-02-19.structure | 1 + _test/data/spec-02-19.tokens | 7 + _test/data/spec-02-20.data | 6 + _test/data/spec-02-20.structure | 1 + _test/data/spec-02-20.tokens | 8 + _test/data/spec-02-21.data | 4 + _test/data/spec-02-21.structure | 1 + _test/data/spec-02-21.tokens | 6 + _test/data/spec-02-22.data | 4 + _test/data/spec-02-22.structure | 1 + _test/data/spec-02-22.tokens | 6 + _test/data/spec-02-23.data | 13 + _test/data/spec-02-23.structure | 1 + _test/data/spec-02-23.tokens | 6 + _test/data/spec-02-24.data | 14 + _test/data/spec-02-24.structure | 5 + _test/data/spec-02-24.tokens | 20 + _test/data/spec-02-25.data | 7 + _test/data/spec-02-25.structure | 1 + _test/data/spec-02-25.tokens | 6 + _test/data/spec-02-26.data | 7 + _test/data/spec-02-26.structure | 5 + _test/data/spec-02-26.tokens | 6 + _test/data/spec-02-27.data | 29 + _test/data/spec-02-27.structure | 17 + _test/data/spec-02-27.tokens | 20 + _test/data/spec-02-28.data | 26 + _test/data/spec-02-28.structure | 10 + _test/data/spec-02-28.tokens | 23 + _test/data/spec-05-01-utf16be.data | Bin 0 -> 34 bytes _test/data/spec-05-01-utf16be.empty | 2 + _test/data/spec-05-01-utf16le.data | Bin 0 -> 34 bytes _test/data/spec-05-01-utf16le.empty | 2 + _test/data/spec-05-01-utf8.data | 1 + _test/data/spec-05-01-utf8.empty | 2 + _test/data/spec-05-02-utf16be.data | Bin 0 -> 90 bytes _test/data/spec-05-02-utf16be.error | 3 + _test/data/spec-05-02-utf16le.data | Bin 0 -> 90 bytes _test/data/spec-05-02-utf16le.error | 3 + _test/data/spec-05-02-utf8.data | 3 + _test/data/spec-05-02-utf8.error | 3 + _test/data/spec-05-03.canonical | 14 + _test/data/spec-05-03.data | 7 + _test/data/spec-05-04.canonical | 13 + _test/data/spec-05-04.data | 2 + _test/data/spec-05-05.data | 1 + _test/data/spec-05-05.empty | 2 + _test/data/spec-05-06.canonical | 8 + _test/data/spec-05-06.data | 2 + _test/data/spec-05-07.canonical | 8 + _test/data/spec-05-07.data | 4 + _test/data/spec-05-08.canonical | 8 + _test/data/spec-05-08.data | 2 + _test/data/spec-05-09.canonical | 3 + _test/data/spec-05-09.data | 2 + _test/data/spec-05-10.data | 2 + _test/data/spec-05-10.error | 3 + _test/data/spec-05-11.canonical | 6 + _test/data/spec-05-11.data | 3 + _test/data/spec-05-12.data | 9 + _test/data/spec-05-12.error | 8 + _test/data/spec-05-13.canonical | 5 + _test/data/spec-05-13.data | 3 + _test/data/spec-05-14.canonical | 7 + _test/data/spec-05-14.data | 2 + _test/data/spec-05-15.data | 3 + _test/data/spec-05-15.error | 3 + _test/data/spec-06-01.canonical | 15 + _test/data/spec-06-01.data | 14 + _test/data/spec-06-02.data | 3 + _test/data/spec-06-02.empty | 2 + _test/data/spec-06-03.canonical | 6 + _test/data/spec-06-03.data | 2 + _test/data/spec-06-04.canonical | 6 + _test/data/spec-06-04.data | 4 + _test/data/spec-06-05.canonical | 16 + _test/data/spec-06-05.data | 6 + _test/data/spec-06-06.canonical | 10 + _test/data/spec-06-06.data | 7 + _test/data/spec-06-07.canonical | 6 + _test/data/spec-06-07.data | 8 + _test/data/spec-06-08.canonical | 5 + _test/data/spec-06-08.data | 2 + _test/data/spec-07-01.canonical | 3 + _test/data/spec-07-01.data | 3 + _test/data/spec-07-01.skip-ext | 0 _test/data/spec-07-02.canonical | 3 + _test/data/spec-07-02.data | 4 + _test/data/spec-07-02.skip-ext | 0 _test/data/spec-07-03.data | 3 + _test/data/spec-07-03.error | 3 + _test/data/spec-07-04.canonical | 3 + _test/data/spec-07-04.data | 3 + _test/data/spec-07-05.data | 3 + _test/data/spec-07-05.error | 4 + _test/data/spec-07-06.canonical | 6 + _test/data/spec-07-06.data | 5 + _test/data/spec-07-07a.canonical | 3 + _test/data/spec-07-07a.data | 2 + _test/data/spec-07-07b.canonical | 3 + _test/data/spec-07-07b.data | 4 + _test/data/spec-07-08.canonical | 7 + _test/data/spec-07-08.data | 9 + _test/data/spec-07-09.canonical | 9 + _test/data/spec-07-09.data | 11 + _test/data/spec-07-10.canonical | 15 + _test/data/spec-07-10.data | 11 + _test/data/spec-07-11.data | 2 + _test/data/spec-07-11.empty | 2 + _test/data/spec-07-12a.canonical | 6 + _test/data/spec-07-12a.data | 3 + _test/data/spec-07-12b.canonical | 3 + _test/data/spec-07-12b.data | 4 + _test/data/spec-07-13.canonical | 9 + _test/data/spec-07-13.data | 9 + _test/data/spec-08-01.canonical | 8 + _test/data/spec-08-01.data | 2 + _test/data/spec-08-02.canonical | 8 + _test/data/spec-08-02.data | 2 + _test/data/spec-08-03.canonical | 6 + _test/data/spec-08-03.data | 2 + _test/data/spec-08-04.data | 2 + _test/data/spec-08-04.error | 6 + _test/data/spec-08-05.canonical | 7 + _test/data/spec-08-05.data | 5 + _test/data/spec-08-06.data | 5 + _test/data/spec-08-06.error | 4 + _test/data/spec-08-07.canonical | 8 + _test/data/spec-08-07.data | 4 + _test/data/spec-08-08.canonical | 15 + _test/data/spec-08-08.data | 13 + _test/data/spec-08-09.canonical | 21 + _test/data/spec-08-09.data | 11 + _test/data/spec-08-10.canonical | 23 + _test/data/spec-08-10.data | 15 + _test/data/spec-08-11.canonical | 8 + _test/data/spec-08-11.data | 2 + _test/data/spec-08-12.canonical | 10 + _test/data/spec-08-12.data | 8 + _test/data/spec-08-13.canonical | 10 + _test/data/spec-08-13.data | 4 + _test/data/spec-08-13.skip-ext | 0 _test/data/spec-08-14.canonical | 10 + _test/data/spec-08-14.data | 5 + _test/data/spec-08-15.canonical | 11 + _test/data/spec-08-15.data | 5 + _test/data/spec-09-01.canonical | 11 + _test/data/spec-09-01.data | 6 + _test/data/spec-09-02.canonical | 7 + _test/data/spec-09-02.data | 6 + _test/data/spec-09-03.canonical | 7 + _test/data/spec-09-03.data | 6 + _test/data/spec-09-04.canonical | 6 + _test/data/spec-09-04.data | 4 + _test/data/spec-09-05.canonical | 7 + _test/data/spec-09-05.data | 8 + _test/data/spec-09-06.canonical | 3 + _test/data/spec-09-06.data | 1 + _test/data/spec-09-07.canonical | 11 + _test/data/spec-09-07.data | 6 + _test/data/spec-09-08.canonical | 6 + _test/data/spec-09-08.data | 1 + _test/data/spec-09-09.canonical | 7 + _test/data/spec-09-09.data | 6 + _test/data/spec-09-10.canonical | 5 + _test/data/spec-09-10.data | 3 + _test/data/spec-09-11.canonical | 6 + _test/data/spec-09-11.data | 5 + _test/data/spec-09-12.canonical | 12 + _test/data/spec-09-12.data | 8 + _test/data/spec-09-13.canonical | 11 + _test/data/spec-09-13.data | 6 + _test/data/spec-09-14.data | 14 + _test/data/spec-09-14.error | 6 + _test/data/spec-09-15.canonical | 18 + _test/data/spec-09-15.data | 13 + _test/data/spec-09-16.canonical | 6 + _test/data/spec-09-16.data | 3 + _test/data/spec-09-17.canonical | 4 + _test/data/spec-09-17.data | 3 + _test/data/spec-09-18.canonical | 8 + _test/data/spec-09-18.data | 9 + _test/data/spec-09-19.canonical | 6 + _test/data/spec-09-19.data | 4 + _test/data/spec-09-20.canonical | 8 + _test/data/spec-09-20.data | 11 + _test/data/spec-09-20.skip-ext | 0 _test/data/spec-09-21.data | 8 + _test/data/spec-09-21.error | 7 + _test/data/spec-09-22.canonical | 10 + _test/data/spec-09-22.data | 4 + _test/data/spec-09-23.canonical | 10 + _test/data/spec-09-23.data | 11 + _test/data/spec-09-24.canonical | 10 + _test/data/spec-09-24.data | 6 + _test/data/spec-09-25.canonical | 4 + _test/data/spec-09-25.data | 3 + _test/data/spec-09-26.canonical | 3 + _test/data/spec-09-26.data | 8 + _test/data/spec-09-27.canonical | 3 + _test/data/spec-09-27.data | 8 + _test/data/spec-09-28.canonical | 3 + _test/data/spec-09-28.data | 8 + _test/data/spec-09-29.canonical | 4 + _test/data/spec-09-29.data | 4 + _test/data/spec-09-30.canonical | 7 + _test/data/spec-09-30.data | 14 + _test/data/spec-09-31.canonical | 7 + _test/data/spec-09-31.data | 14 + _test/data/spec-09-32.canonical | 7 + _test/data/spec-09-32.data | 14 + _test/data/spec-09-33.canonical | 7 + _test/data/spec-09-33.data | 14 + _test/data/spec-10-01.canonical | 12 + _test/data/spec-10-01.data | 2 + _test/data/spec-10-02.canonical | 14 + _test/data/spec-10-02.data | 8 + _test/data/spec-10-03.canonical | 12 + _test/data/spec-10-03.data | 4 + _test/data/spec-10-04.canonical | 11 + _test/data/spec-10-04.data | 4 + _test/data/spec-10-05.canonical | 14 + _test/data/spec-10-05.data | 7 + _test/data/spec-10-06.canonical | 16 + _test/data/spec-10-06.data | 2 + _test/data/spec-10-07.canonical | 16 + _test/data/spec-10-07.data | 7 + _test/data/spec-10-08.data | 5 + _test/data/spec-10-08.error | 5 + _test/data/spec-10-09.canonical | 8 + _test/data/spec-10-09.data | 4 + _test/data/spec-10-10.canonical | 16 + _test/data/spec-10-10.data | 8 + _test/data/spec-10-11.canonical | 24 + _test/data/spec-10-11.data | 7 + _test/data/spec-10-12.canonical | 9 + _test/data/spec-10-12.data | 3 + _test/data/spec-10-13.canonical | 11 + _test/data/spec-10-13.data | 5 + _test/data/spec-10-14.canonical | 11 + _test/data/spec-10-14.data | 4 + _test/data/spec-10-15.canonical | 18 + _test/data/spec-10-15.data | 3 + _test/data/str.data | 1 + _test/data/str.detect | 1 + _test/data/tags.events | 12 + _test/data/test_mark.marks | 38 + _test/data/timestamp-bugs.code | 8 + _test/data/timestamp-bugs.data | 6 + _test/data/timestamp.data | 5 + _test/data/timestamp.detect | 1 + _test/data/unclosed-bracket.loader-error | 6 + _test/data/unclosed-quoted-scalar.loader-error | 2 + _test/data/undefined-anchor.loader-error | 3 + _test/data/undefined-constructor.loader-error | 1 + _test/data/undefined-tag-handle.loader-error | 1 + _test/data/unknown.dumper-error | 1 + _test/data/unsupported-version.emitter-error | 5 + _test/data/utf16be.code | 1 + _test/data/utf16be.data | Bin 0 -> 30 bytes _test/data/utf16le.code | 1 + _test/data/utf16le.data | Bin 0 -> 30 bytes _test/data/utf8-implicit.code | 1 + _test/data/utf8-implicit.data | 1 + _test/data/utf8.code | 1 + _test/data/utf8.data | 1 + _test/data/util/00_ok.yaml | 3 + _test/data/util/01_second_rt_ok.yaml | 3 + _test/data/util/02_not_ok.yaml | 2 + _test/data/util/03_no_comment_ok.yaml | 2 + _test/data/valid_escape_characters.code | 1 + _test/data/valid_escape_characters.data | 1 + _test/data/valid_escape_characters.skip-ext | 0 _test/data/value.data | 1 + _test/data/value.detect | 1 + _test/data/yaml.data | 3 + _test/data/yaml.detect | 1 + _test/lib/canonical.py | 387 +++ _test/lib/test_all.py | 21 + _test/lib/test_appliance.py | 205 ++ _test/lib/test_build.py | 15 + _test/lib/test_build_ext.py | 15 + _test/lib/test_canonical.py | 55 + _test/lib/test_constructor.py | 393 +++ _test/lib/test_emitter.py | 145 ++ _test/lib/test_errors.py | 100 + _test/lib/test_input_output.py | 190 ++ _test/lib/test_mark.py | 40 + _test/lib/test_reader.py | 49 + _test/lib/test_recursive.py | 63 + _test/lib/test_representer.py | 59 + _test/lib/test_resolver.py | 117 + _test/lib/test_structure.py | 234 ++ _test/lib/test_tokens.py | 93 + _test/lib/test_yaml.py | 21 + _test/lib/test_yaml_ext.py | 418 ++++ _test/roundtrip.py | 346 +++ _test/test_a_dedent.py | 57 + _test/test_add_xxx.py | 184 ++ _test/test_anchor.py | 608 +++++ _test/test_api_change.py | 230 ++ _test/test_class_register.py | 141 ++ _test/test_collections.py | 19 + _test/test_comment_manipulation.py | 721 ++++++ _test/test_comments.py | 964 ++++++++ _test/test_contextmanager.py | 116 + _test/test_copy.py | 135 ++ _test/test_cyaml.py | 97 + _test/test_datetime.py | 158 ++ _test/test_deprecation.py | 14 + _test/test_documents.py | 75 + _test/test_fail.py | 255 ++ _test/test_float.py | 90 + _test/test_flowsequencekey.py | 25 + _test/test_indentation.py | 352 +++ _test/test_int.py | 34 + _test/test_issues.py | 957 ++++++++ _test/test_json_numbers.py | 56 + _test/test_line_col.py | 104 + _test/test_literal.py | 335 +++ _test/test_none.py | 42 + _test/test_numpy.py | 22 + _test/test_program_config.py | 65 + _test/test_spec_examples.py | 337 +++ _test/test_string.py | 228 ++ _test/test_tag.py | 171 ++ _test/test_version.py | 177 ++ _test/test_yamlfile.py | 229 ++ _test/test_yamlobject.py | 82 + _test/test_z_check_debug_leftovers.py | 40 + _test/test_z_data.py | 272 +++ _test/test_z_olddata.py | 42 + lib/ruyaml/__init__.py | 58 + lib/ruyaml/anchor.py | 20 + lib/ruyaml/comments.py | 1280 ++++++++++ lib/ruyaml/compat.py | 263 +++ lib/ruyaml/composer.py | 242 ++ lib/ruyaml/configobjwalker.py | 14 + lib/ruyaml/constructor.py | 1920 +++++++++++++++ lib/ruyaml/cyaml.py | 191 ++ lib/ruyaml/dumper.py | 225 ++ lib/ruyaml/emitter.py | 1797 ++++++++++++++ lib/ruyaml/error.py | 334 +++ lib/ruyaml/events.py | 201 ++ lib/ruyaml/loader.py | 78 + lib/ruyaml/main.py | 1702 +++++++++++++ lib/ruyaml/nodes.py | 146 ++ lib/ruyaml/parser.py | 938 ++++++++ lib/ruyaml/py.typed | 0 lib/ruyaml/reader.py | 315 +++ lib/ruyaml/representer.py | 1197 ++++++++++ lib/ruyaml/resolver.py | 421 ++++ lib/ruyaml/scalarbool.py | 47 + lib/ruyaml/scalarfloat.py | 135 ++ lib/ruyaml/scalarint.py | 137 ++ lib/ruyaml/scalarstring.py | 152 ++ lib/ruyaml/scanner.py | 2491 ++++++++++++++++++++ lib/ruyaml/serializer.py | 251 ++ lib/ruyaml/timestamp.py | 65 + lib/ruyaml/tokens.py | 413 ++++ lib/ruyaml/util.py | 247 ++ pyproject.toml | 27 + setup.cfg | 76 + tox.ini | 67 + 680 files changed, 33297 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 .github/release-drafter.yml create mode 100644 .github/workflows/ack.yml create mode 100644 .github/workflows/push.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/tox.yml create mode 100644 .gitignore create mode 100644 .hgignore create mode 100644 .pre-commit-config.yaml create mode 100644 .readthedocs.yaml create mode 100644 CHANGES create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 _doc/Makefile create mode 100644 _doc/_static/license.svg create mode 100644 _doc/_static/pypi.svg create mode 100644 _doc/api.rst create mode 100644 _doc/basicuse.rst create mode 100644 _doc/conf.py create mode 100644 _doc/contributing.rst create mode 100644 _doc/contributing.ryd create mode 100644 _doc/detail.rst create mode 100644 _doc/dumpcls.rst create mode 100644 _doc/dumpcls.ryd create mode 100644 _doc/example.rst create mode 100644 _doc/index.rst create mode 100644 _doc/index.ryd create mode 100644 _doc/install.rst create mode 100644 _doc/overview.rst create mode 100644 _doc/pyyaml.rst create mode 100644 _doc/upmerge.rst create mode 100644 _test/__init__.py create mode 100644 _test/data/a-nasty-libyaml-bug.loader-error create mode 100644 _test/data/aliases-cdumper-bug.code create mode 100644 _test/data/aliases.events create mode 100644 _test/data/bool.data create mode 100644 _test/data/bool.detect create mode 100644 _test/data/colon-in-flow-context.loader-error create mode 100644 _test/data/comment_no_eol.data create mode 100644 _test/data/composite_key.code create mode 100644 _test/data/composite_key.data create mode 100644 _test/data/construct-binary-py3.code create mode 100644 _test/data/construct-binary-py3.data create mode 100644 _test/data/construct-bool.code create mode 100644 _test/data/construct-bool.data create mode 100644 _test/data/construct-custom.code create mode 100644 _test/data/construct-custom.data create mode 100644 _test/data/construct-float.code create mode 100644 _test/data/construct-float.data create mode 100644 _test/data/construct-int.code create mode 100644 _test/data/construct-int.data create mode 100644 _test/data/construct-map.code create mode 100644 _test/data/construct-map.data create mode 100644 _test/data/construct-merge.code create mode 100644 _test/data/construct-merge.data create mode 100644 _test/data/construct-null.code create mode 100644 _test/data/construct-null.data create mode 100644 _test/data/construct-omap.code create mode 100644 _test/data/construct-omap.data create mode 100644 _test/data/construct-pairs.code create mode 100644 _test/data/construct-pairs.data create mode 100644 _test/data/construct-python-bool.code create mode 100644 _test/data/construct-python-bool.data create mode 100644 _test/data/construct-python-bytes-py3.code create mode 100644 _test/data/construct-python-bytes-py3.data create mode 100644 _test/data/construct-python-complex.code create mode 100644 _test/data/construct-python-complex.data create mode 100644 _test/data/construct-python-float.code create mode 100644 _test/data/construct-python-float.data create mode 100644 _test/data/construct-python-int.code create mode 100644 _test/data/construct-python-int.data create mode 100644 _test/data/construct-python-long-short-py3.code create mode 100644 _test/data/construct-python-long-short-py3.data create mode 100644 _test/data/construct-python-name-module.code create mode 100644 _test/data/construct-python-name-module.data create mode 100644 _test/data/construct-python-none.code create mode 100644 _test/data/construct-python-none.data create mode 100644 _test/data/construct-python-object.code create mode 100644 _test/data/construct-python-object.data create mode 100644 _test/data/construct-python-str-ascii.code create mode 100644 _test/data/construct-python-str-ascii.data create mode 100644 _test/data/construct-python-str-utf8-py2.code create mode 100644 _test/data/construct-python-str-utf8-py3.code create mode 100644 _test/data/construct-python-str-utf8-py3.data create mode 100644 _test/data/construct-python-tuple-list-dict.code create mode 100644 _test/data/construct-python-tuple-list-dict.data create mode 100644 _test/data/construct-python-unicode-ascii-py3.code create mode 100644 _test/data/construct-python-unicode-ascii-py3.data create mode 100644 _test/data/construct-python-unicode-utf8-py2.code create mode 100644 _test/data/construct-python-unicode-utf8-py3.code create mode 100644 _test/data/construct-python-unicode-utf8-py3.data create mode 100644 _test/data/construct-seq.code create mode 100644 _test/data/construct-seq.data create mode 100644 _test/data/construct-set.code create mode 100644 _test/data/construct-set.data create mode 100644 _test/data/construct-str-ascii.code create mode 100644 _test/data/construct-str-ascii.data create mode 100644 _test/data/construct-str-utf8-py2.code create mode 100644 _test/data/construct-str-utf8-py3.code create mode 100644 _test/data/construct-str-utf8-py3.data create mode 100644 _test/data/construct-str.code create mode 100644 _test/data/construct-str.data create mode 100644 _test/data/construct-timestamp.code create mode 100644 _test/data/construct-timestamp.data create mode 100644 _test/data/construct-value.code create mode 100644 _test/data/construct-value.data create mode 100644 _test/data/document-separator-in-quoted-scalar.loader-error create mode 100644 _test/data/documents.events create mode 100644 _test/data/duplicate-anchor-1.loader-warning create mode 100644 _test/data/duplicate-anchor-2.loader-warning create mode 100644 _test/data/duplicate-merge-key.former-loader-error.code create mode 100644 _test/data/duplicate-tag-directive.loader-error create mode 100644 _test/data/duplicate-yaml-directive.loader-error create mode 100644 _test/data/emit-block-scalar-in-simple-key-context-bug.canonical create mode 100644 _test/data/emit-block-scalar-in-simple-key-context-bug.data create mode 100644 _test/data/emitting-unacceptable-unicode-character-bug-py3.code create mode 100644 _test/data/emitting-unacceptable-unicode-character-bug-py3.data create mode 100644 _test/data/emitting-unacceptable-unicode-character-bug-py3.skip-ext create mode 100644 _test/data/empty-anchor.emitter-error create mode 100644 _test/data/empty-document-bug.canonical create mode 100644 _test/data/empty-document-bug.data create mode 100644 _test/data/empty-document-bug.empty create mode 100644 _test/data/empty-documents.single-loader-error create mode 100644 _test/data/empty-python-module.loader-error create mode 100644 _test/data/empty-python-name.loader-error create mode 100644 _test/data/empty-tag-handle.emitter-error create mode 100644 _test/data/empty-tag-prefix.emitter-error create mode 100644 _test/data/empty-tag.emitter-error create mode 100644 _test/data/expected-document-end.emitter-error create mode 100644 _test/data/expected-document-start.emitter-error create mode 100644 _test/data/expected-mapping.loader-error create mode 100644 _test/data/expected-node-1.emitter-error create mode 100644 _test/data/expected-node-2.emitter-error create mode 100644 _test/data/expected-nothing.emitter-error create mode 100644 _test/data/expected-scalar.loader-error create mode 100644 _test/data/expected-sequence.loader-error create mode 100644 _test/data/expected-stream-start.emitter-error create mode 100644 _test/data/explicit-document.single-loader-error create mode 100644 _test/data/fetch-complex-value-bug.loader-error create mode 100644 _test/data/float-representer-2.3-bug.code create mode 100644 _test/data/float-representer-2.3-bug.data create mode 100644 _test/data/float.data create mode 100644 _test/data/float.detect create mode 100644 _test/data/forbidden-entry.loader-error create mode 100644 _test/data/forbidden-key.loader-error create mode 100644 _test/data/forbidden-value.loader-error create mode 100644 _test/data/implicit-document.single-loader-error create mode 100644 _test/data/int.data create mode 100644 _test/data/int.detect create mode 100644 _test/data/invalid-anchor-1.loader-error create mode 100644 _test/data/invalid-anchor-2.loader-error create mode 100644 _test/data/invalid-anchor.emitter-error create mode 100644 _test/data/invalid-base64-data-2.loader-error create mode 100644 _test/data/invalid-base64-data.loader-error create mode 100644 _test/data/invalid-block-scalar-indicator.loader-error create mode 100644 _test/data/invalid-character.loader-error create mode 100644 _test/data/invalid-character.stream-error create mode 100644 _test/data/invalid-directive-line.loader-error create mode 100644 _test/data/invalid-directive-name-1.loader-error create mode 100644 _test/data/invalid-directive-name-2.loader-error create mode 100644 _test/data/invalid-escape-character.loader-error create mode 100644 _test/data/invalid-escape-numbers.loader-error create mode 100644 _test/data/invalid-indentation-indicator-1.loader-error create mode 100644 _test/data/invalid-indentation-indicator-2.loader-error create mode 100644 _test/data/invalid-item-without-trailing-break.loader-error create mode 100644 _test/data/invalid-merge-1.loader-error create mode 100644 _test/data/invalid-merge-2.loader-error create mode 100644 _test/data/invalid-omap-1.loader-error create mode 100644 _test/data/invalid-omap-2.loader-error create mode 100644 _test/data/invalid-omap-3.loader-error create mode 100644 _test/data/invalid-pairs-1.loader-error create mode 100644 _test/data/invalid-pairs-2.loader-error create mode 100644 _test/data/invalid-pairs-3.loader-error create mode 100644 _test/data/invalid-python-bytes-2-py3.loader-error create mode 100644 _test/data/invalid-python-bytes-py3.loader-error create mode 100644 _test/data/invalid-python-module-kind.loader-error create mode 100644 _test/data/invalid-python-module-value.loader-error create mode 100644 _test/data/invalid-python-module.loader-error create mode 100644 _test/data/invalid-python-name-kind.loader-error create mode 100644 _test/data/invalid-python-name-module-2.loader-error create mode 100644 _test/data/invalid-python-name-module.loader-error create mode 100644 _test/data/invalid-python-name-object.loader-error create mode 100644 _test/data/invalid-python-name-value.loader-error create mode 100644 _test/data/invalid-simple-key.loader-error create mode 100644 _test/data/invalid-single-quote-bug.code create mode 100644 _test/data/invalid-single-quote-bug.data create mode 100644 _test/data/invalid-starting-character.loader-error create mode 100644 _test/data/invalid-tag-1.loader-error create mode 100644 _test/data/invalid-tag-2.loader-error create mode 100644 _test/data/invalid-tag-directive-handle.loader-error create mode 100644 _test/data/invalid-tag-directive-prefix.loader-error create mode 100644 _test/data/invalid-tag-handle-1.emitter-error create mode 100644 _test/data/invalid-tag-handle-1.loader-error create mode 100644 _test/data/invalid-tag-handle-2.emitter-error create mode 100644 _test/data/invalid-tag-handle-2.loader-error create mode 100644 _test/data/invalid-uri-escapes-1.loader-error create mode 100644 _test/data/invalid-uri-escapes-2.loader-error create mode 100644 _test/data/invalid-uri-escapes-3.loader-error create mode 100644 _test/data/invalid-uri.loader-error create mode 100644 _test/data/invalid-utf8-byte.loader-error create mode 100644 _test/data/invalid-utf8-byte.stream-error create mode 100644 _test/data/invalid-yaml-directive-version-1.loader-error create mode 100644 _test/data/invalid-yaml-directive-version-2.loader-error create mode 100644 _test/data/invalid-yaml-directive-version-3.loader-error create mode 100644 _test/data/invalid-yaml-directive-version-4.loader-error create mode 100644 _test/data/invalid-yaml-directive-version-5.loader-error create mode 100644 _test/data/invalid-yaml-directive-version-6.loader-error create mode 100644 _test/data/invalid-yaml-version.loader-error create mode 100644 _test/data/latin.unicode create mode 100644 _test/data/mappings.events create mode 100644 _test/data/merge.data create mode 100644 _test/data/merge.detect create mode 100644 _test/data/more-floats.code create mode 100644 _test/data/more-floats.data create mode 100644 _test/data/negative-float-bug.code create mode 100644 _test/data/negative-float-bug.data create mode 100644 _test/data/no-alias-anchor.emitter-error create mode 100644 _test/data/no-alias-anchor.skip-ext create mode 100644 _test/data/no-block-collection-end.loader-error create mode 100644 _test/data/no-block-mapping-end-2.loader-error create mode 100644 _test/data/no-block-mapping-end.loader-error create mode 100644 _test/data/no-document-start.loader-error create mode 100644 _test/data/no-flow-mapping-end.loader-error create mode 100644 _test/data/no-flow-sequence-end.loader-error create mode 100644 _test/data/no-node-1.loader-error create mode 100644 _test/data/no-node-2.loader-error create mode 100644 _test/data/no-tag.emitter-error create mode 100644 _test/data/null.data create mode 100644 _test/data/null.detect create mode 100644 _test/data/odd-utf16.stream-error create mode 100644 _test/data/omap.data create mode 100644 _test/data/omap.roundtrip create mode 100644 _test/data/recursive-anchor.former-loader-error create mode 100644 _test/data/recursive-dict.recursive create mode 100644 _test/data/recursive-list.recursive create mode 100644 _test/data/recursive-set.recursive create mode 100644 _test/data/recursive-state.recursive create mode 100644 _test/data/recursive-tuple.recursive create mode 100644 _test/data/recursive.former-dumper-error create mode 100644 _test/data/remove-possible-simple-key-bug.loader-error create mode 100644 _test/data/resolver.data create mode 100644 _test/data/resolver.path create mode 100644 _test/data/run-parser-crash-bug.data create mode 100644 _test/data/scalars.events create mode 100644 _test/data/scan-document-end-bug.canonical create mode 100644 _test/data/scan-document-end-bug.data create mode 100644 _test/data/scan-line-break-bug.canonical create mode 100644 _test/data/scan-line-break-bug.data create mode 100644 _test/data/sequences.events create mode 100644 _test/data/serializer-is-already-opened.dumper-error create mode 100644 _test/data/serializer-is-closed-1.dumper-error create mode 100644 _test/data/serializer-is-closed-2.dumper-error create mode 100644 _test/data/serializer-is-not-opened-1.dumper-error create mode 100644 _test/data/serializer-is-not-opened-2.dumper-error create mode 100644 _test/data/single-dot-is-not-float-bug.code create mode 100644 _test/data/single-dot-is-not-float-bug.data create mode 100644 _test/data/sloppy-indentation.canonical create mode 100644 _test/data/sloppy-indentation.data create mode 100644 _test/data/spec-02-01.code create mode 100644 _test/data/spec-02-01.data create mode 100644 _test/data/spec-02-01.structure create mode 100644 _test/data/spec-02-01.tokens create mode 100644 _test/data/spec-02-02.data create mode 100644 _test/data/spec-02-02.structure create mode 100644 _test/data/spec-02-02.tokens create mode 100644 _test/data/spec-02-03.data create mode 100644 _test/data/spec-02-03.structure create mode 100644 _test/data/spec-02-03.tokens create mode 100644 _test/data/spec-02-04.data create mode 100644 _test/data/spec-02-04.structure create mode 100644 _test/data/spec-02-04.tokens create mode 100644 _test/data/spec-02-05.data create mode 100644 _test/data/spec-02-05.structure create mode 100644 _test/data/spec-02-05.tokens create mode 100644 _test/data/spec-02-06.data create mode 100644 _test/data/spec-02-06.structure create mode 100644 _test/data/spec-02-06.tokens create mode 100644 _test/data/spec-02-07.data create mode 100644 _test/data/spec-02-07.structure create mode 100644 _test/data/spec-02-07.tokens create mode 100644 _test/data/spec-02-08.data create mode 100644 _test/data/spec-02-08.structure create mode 100644 _test/data/spec-02-08.tokens create mode 100644 _test/data/spec-02-09.data create mode 100644 _test/data/spec-02-09.structure create mode 100644 _test/data/spec-02-09.tokens create mode 100644 _test/data/spec-02-10.data create mode 100644 _test/data/spec-02-10.structure create mode 100644 _test/data/spec-02-10.tokens create mode 100644 _test/data/spec-02-11.code create mode 100644 _test/data/spec-02-11.data create mode 100644 _test/data/spec-02-11.structure create mode 100644 _test/data/spec-02-11.tokens create mode 100644 _test/data/spec-02-12.data create mode 100644 _test/data/spec-02-12.structure create mode 100644 _test/data/spec-02-12.tokens create mode 100644 _test/data/spec-02-13.data create mode 100644 _test/data/spec-02-13.structure create mode 100644 _test/data/spec-02-13.tokens create mode 100644 _test/data/spec-02-14.data create mode 100644 _test/data/spec-02-14.structure create mode 100644 _test/data/spec-02-14.tokens create mode 100644 _test/data/spec-02-15.data create mode 100644 _test/data/spec-02-15.structure create mode 100644 _test/data/spec-02-15.tokens create mode 100644 _test/data/spec-02-16.data create mode 100644 _test/data/spec-02-16.structure create mode 100644 _test/data/spec-02-16.tokens create mode 100644 _test/data/spec-02-17.data create mode 100644 _test/data/spec-02-17.structure create mode 100644 _test/data/spec-02-17.tokens create mode 100644 _test/data/spec-02-18.data create mode 100644 _test/data/spec-02-18.structure create mode 100644 _test/data/spec-02-18.tokens create mode 100644 _test/data/spec-02-19.data create mode 100644 _test/data/spec-02-19.structure create mode 100644 _test/data/spec-02-19.tokens create mode 100644 _test/data/spec-02-20.data create mode 100644 _test/data/spec-02-20.structure create mode 100644 _test/data/spec-02-20.tokens create mode 100644 _test/data/spec-02-21.data create mode 100644 _test/data/spec-02-21.structure create mode 100644 _test/data/spec-02-21.tokens create mode 100644 _test/data/spec-02-22.data create mode 100644 _test/data/spec-02-22.structure create mode 100644 _test/data/spec-02-22.tokens create mode 100644 _test/data/spec-02-23.data create mode 100644 _test/data/spec-02-23.structure create mode 100644 _test/data/spec-02-23.tokens create mode 100644 _test/data/spec-02-24.data create mode 100644 _test/data/spec-02-24.structure create mode 100644 _test/data/spec-02-24.tokens create mode 100644 _test/data/spec-02-25.data create mode 100644 _test/data/spec-02-25.structure create mode 100644 _test/data/spec-02-25.tokens create mode 100644 _test/data/spec-02-26.data create mode 100644 _test/data/spec-02-26.structure create mode 100644 _test/data/spec-02-26.tokens create mode 100644 _test/data/spec-02-27.data create mode 100644 _test/data/spec-02-27.structure create mode 100644 _test/data/spec-02-27.tokens create mode 100644 _test/data/spec-02-28.data create mode 100644 _test/data/spec-02-28.structure create mode 100644 _test/data/spec-02-28.tokens create mode 100644 _test/data/spec-05-01-utf16be.data create mode 100644 _test/data/spec-05-01-utf16be.empty create mode 100644 _test/data/spec-05-01-utf16le.data create mode 100644 _test/data/spec-05-01-utf16le.empty create mode 100644 _test/data/spec-05-01-utf8.data create mode 100644 _test/data/spec-05-01-utf8.empty create mode 100644 _test/data/spec-05-02-utf16be.data create mode 100644 _test/data/spec-05-02-utf16be.error create mode 100644 _test/data/spec-05-02-utf16le.data create mode 100644 _test/data/spec-05-02-utf16le.error create mode 100644 _test/data/spec-05-02-utf8.data create mode 100644 _test/data/spec-05-02-utf8.error create mode 100644 _test/data/spec-05-03.canonical create mode 100644 _test/data/spec-05-03.data create mode 100644 _test/data/spec-05-04.canonical create mode 100644 _test/data/spec-05-04.data create mode 100644 _test/data/spec-05-05.data create mode 100644 _test/data/spec-05-05.empty create mode 100644 _test/data/spec-05-06.canonical create mode 100644 _test/data/spec-05-06.data create mode 100644 _test/data/spec-05-07.canonical create mode 100644 _test/data/spec-05-07.data create mode 100644 _test/data/spec-05-08.canonical create mode 100644 _test/data/spec-05-08.data create mode 100644 _test/data/spec-05-09.canonical create mode 100644 _test/data/spec-05-09.data create mode 100644 _test/data/spec-05-10.data create mode 100644 _test/data/spec-05-10.error create mode 100644 _test/data/spec-05-11.canonical create mode 100644 _test/data/spec-05-11.data create mode 100644 _test/data/spec-05-12.data create mode 100644 _test/data/spec-05-12.error create mode 100644 _test/data/spec-05-13.canonical create mode 100644 _test/data/spec-05-13.data create mode 100644 _test/data/spec-05-14.canonical create mode 100644 _test/data/spec-05-14.data create mode 100644 _test/data/spec-05-15.data create mode 100644 _test/data/spec-05-15.error create mode 100644 _test/data/spec-06-01.canonical create mode 100644 _test/data/spec-06-01.data create mode 100644 _test/data/spec-06-02.data create mode 100644 _test/data/spec-06-02.empty create mode 100644 _test/data/spec-06-03.canonical create mode 100644 _test/data/spec-06-03.data create mode 100644 _test/data/spec-06-04.canonical create mode 100644 _test/data/spec-06-04.data create mode 100644 _test/data/spec-06-05.canonical create mode 100644 _test/data/spec-06-05.data create mode 100644 _test/data/spec-06-06.canonical create mode 100644 _test/data/spec-06-06.data create mode 100644 _test/data/spec-06-07.canonical create mode 100644 _test/data/spec-06-07.data create mode 100644 _test/data/spec-06-08.canonical create mode 100644 _test/data/spec-06-08.data create mode 100644 _test/data/spec-07-01.canonical create mode 100644 _test/data/spec-07-01.data create mode 100644 _test/data/spec-07-01.skip-ext create mode 100644 _test/data/spec-07-02.canonical create mode 100644 _test/data/spec-07-02.data create mode 100644 _test/data/spec-07-02.skip-ext create mode 100644 _test/data/spec-07-03.data create mode 100644 _test/data/spec-07-03.error create mode 100644 _test/data/spec-07-04.canonical create mode 100644 _test/data/spec-07-04.data create mode 100644 _test/data/spec-07-05.data create mode 100644 _test/data/spec-07-05.error create mode 100644 _test/data/spec-07-06.canonical create mode 100644 _test/data/spec-07-06.data create mode 100644 _test/data/spec-07-07a.canonical create mode 100644 _test/data/spec-07-07a.data create mode 100644 _test/data/spec-07-07b.canonical create mode 100644 _test/data/spec-07-07b.data create mode 100644 _test/data/spec-07-08.canonical create mode 100644 _test/data/spec-07-08.data create mode 100644 _test/data/spec-07-09.canonical create mode 100644 _test/data/spec-07-09.data create mode 100644 _test/data/spec-07-10.canonical create mode 100644 _test/data/spec-07-10.data create mode 100644 _test/data/spec-07-11.data create mode 100644 _test/data/spec-07-11.empty create mode 100644 _test/data/spec-07-12a.canonical create mode 100644 _test/data/spec-07-12a.data create mode 100644 _test/data/spec-07-12b.canonical create mode 100644 _test/data/spec-07-12b.data create mode 100644 _test/data/spec-07-13.canonical create mode 100644 _test/data/spec-07-13.data create mode 100644 _test/data/spec-08-01.canonical create mode 100644 _test/data/spec-08-01.data create mode 100644 _test/data/spec-08-02.canonical create mode 100644 _test/data/spec-08-02.data create mode 100644 _test/data/spec-08-03.canonical create mode 100644 _test/data/spec-08-03.data create mode 100644 _test/data/spec-08-04.data create mode 100644 _test/data/spec-08-04.error create mode 100644 _test/data/spec-08-05.canonical create mode 100644 _test/data/spec-08-05.data create mode 100644 _test/data/spec-08-06.data create mode 100644 _test/data/spec-08-06.error create mode 100644 _test/data/spec-08-07.canonical create mode 100644 _test/data/spec-08-07.data create mode 100644 _test/data/spec-08-08.canonical create mode 100644 _test/data/spec-08-08.data create mode 100644 _test/data/spec-08-09.canonical create mode 100644 _test/data/spec-08-09.data create mode 100644 _test/data/spec-08-10.canonical create mode 100644 _test/data/spec-08-10.data create mode 100644 _test/data/spec-08-11.canonical create mode 100644 _test/data/spec-08-11.data create mode 100644 _test/data/spec-08-12.canonical create mode 100644 _test/data/spec-08-12.data create mode 100644 _test/data/spec-08-13.canonical create mode 100644 _test/data/spec-08-13.data create mode 100644 _test/data/spec-08-13.skip-ext create mode 100644 _test/data/spec-08-14.canonical create mode 100644 _test/data/spec-08-14.data create mode 100644 _test/data/spec-08-15.canonical create mode 100644 _test/data/spec-08-15.data create mode 100644 _test/data/spec-09-01.canonical create mode 100644 _test/data/spec-09-01.data create mode 100644 _test/data/spec-09-02.canonical create mode 100644 _test/data/spec-09-02.data create mode 100644 _test/data/spec-09-03.canonical create mode 100644 _test/data/spec-09-03.data create mode 100644 _test/data/spec-09-04.canonical create mode 100644 _test/data/spec-09-04.data create mode 100644 _test/data/spec-09-05.canonical create mode 100644 _test/data/spec-09-05.data create mode 100644 _test/data/spec-09-06.canonical create mode 100644 _test/data/spec-09-06.data create mode 100644 _test/data/spec-09-07.canonical create mode 100644 _test/data/spec-09-07.data create mode 100644 _test/data/spec-09-08.canonical create mode 100644 _test/data/spec-09-08.data create mode 100644 _test/data/spec-09-09.canonical create mode 100644 _test/data/spec-09-09.data create mode 100644 _test/data/spec-09-10.canonical create mode 100644 _test/data/spec-09-10.data create mode 100644 _test/data/spec-09-11.canonical create mode 100644 _test/data/spec-09-11.data create mode 100644 _test/data/spec-09-12.canonical create mode 100644 _test/data/spec-09-12.data create mode 100644 _test/data/spec-09-13.canonical create mode 100644 _test/data/spec-09-13.data create mode 100644 _test/data/spec-09-14.data create mode 100644 _test/data/spec-09-14.error create mode 100644 _test/data/spec-09-15.canonical create mode 100644 _test/data/spec-09-15.data create mode 100644 _test/data/spec-09-16.canonical create mode 100644 _test/data/spec-09-16.data create mode 100644 _test/data/spec-09-17.canonical create mode 100644 _test/data/spec-09-17.data create mode 100644 _test/data/spec-09-18.canonical create mode 100644 _test/data/spec-09-18.data create mode 100644 _test/data/spec-09-19.canonical create mode 100644 _test/data/spec-09-19.data create mode 100644 _test/data/spec-09-20.canonical create mode 100644 _test/data/spec-09-20.data create mode 100644 _test/data/spec-09-20.skip-ext create mode 100644 _test/data/spec-09-21.data create mode 100644 _test/data/spec-09-21.error create mode 100644 _test/data/spec-09-22.canonical create mode 100644 _test/data/spec-09-22.data create mode 100644 _test/data/spec-09-23.canonical create mode 100644 _test/data/spec-09-23.data create mode 100644 _test/data/spec-09-24.canonical create mode 100644 _test/data/spec-09-24.data create mode 100644 _test/data/spec-09-25.canonical create mode 100644 _test/data/spec-09-25.data create mode 100644 _test/data/spec-09-26.canonical create mode 100644 _test/data/spec-09-26.data create mode 100644 _test/data/spec-09-27.canonical create mode 100644 _test/data/spec-09-27.data create mode 100644 _test/data/spec-09-28.canonical create mode 100644 _test/data/spec-09-28.data create mode 100644 _test/data/spec-09-29.canonical create mode 100644 _test/data/spec-09-29.data create mode 100644 _test/data/spec-09-30.canonical create mode 100644 _test/data/spec-09-30.data create mode 100644 _test/data/spec-09-31.canonical create mode 100644 _test/data/spec-09-31.data create mode 100644 _test/data/spec-09-32.canonical create mode 100644 _test/data/spec-09-32.data create mode 100644 _test/data/spec-09-33.canonical create mode 100644 _test/data/spec-09-33.data create mode 100644 _test/data/spec-10-01.canonical create mode 100644 _test/data/spec-10-01.data create mode 100644 _test/data/spec-10-02.canonical create mode 100644 _test/data/spec-10-02.data create mode 100644 _test/data/spec-10-03.canonical create mode 100644 _test/data/spec-10-03.data create mode 100644 _test/data/spec-10-04.canonical create mode 100644 _test/data/spec-10-04.data create mode 100644 _test/data/spec-10-05.canonical create mode 100644 _test/data/spec-10-05.data create mode 100644 _test/data/spec-10-06.canonical create mode 100644 _test/data/spec-10-06.data create mode 100644 _test/data/spec-10-07.canonical create mode 100644 _test/data/spec-10-07.data create mode 100644 _test/data/spec-10-08.data create mode 100644 _test/data/spec-10-08.error create mode 100644 _test/data/spec-10-09.canonical create mode 100644 _test/data/spec-10-09.data create mode 100644 _test/data/spec-10-10.canonical create mode 100644 _test/data/spec-10-10.data create mode 100644 _test/data/spec-10-11.canonical create mode 100644 _test/data/spec-10-11.data create mode 100644 _test/data/spec-10-12.canonical create mode 100644 _test/data/spec-10-12.data create mode 100644 _test/data/spec-10-13.canonical create mode 100644 _test/data/spec-10-13.data create mode 100644 _test/data/spec-10-14.canonical create mode 100644 _test/data/spec-10-14.data create mode 100644 _test/data/spec-10-15.canonical create mode 100644 _test/data/spec-10-15.data create mode 100644 _test/data/str.data create mode 100644 _test/data/str.detect create mode 100644 _test/data/tags.events create mode 100644 _test/data/test_mark.marks create mode 100644 _test/data/timestamp-bugs.code create mode 100644 _test/data/timestamp-bugs.data create mode 100644 _test/data/timestamp.data create mode 100644 _test/data/timestamp.detect create mode 100644 _test/data/unclosed-bracket.loader-error create mode 100644 _test/data/unclosed-quoted-scalar.loader-error create mode 100644 _test/data/undefined-anchor.loader-error create mode 100644 _test/data/undefined-constructor.loader-error create mode 100644 _test/data/undefined-tag-handle.loader-error create mode 100644 _test/data/unknown.dumper-error create mode 100644 _test/data/unsupported-version.emitter-error create mode 100644 _test/data/utf16be.code create mode 100644 _test/data/utf16be.data create mode 100644 _test/data/utf16le.code create mode 100644 _test/data/utf16le.data create mode 100644 _test/data/utf8-implicit.code create mode 100644 _test/data/utf8-implicit.data create mode 100644 _test/data/utf8.code create mode 100644 _test/data/utf8.data create mode 100644 _test/data/util/00_ok.yaml create mode 100644 _test/data/util/01_second_rt_ok.yaml create mode 100644 _test/data/util/02_not_ok.yaml create mode 100644 _test/data/util/03_no_comment_ok.yaml create mode 100644 _test/data/valid_escape_characters.code create mode 100644 _test/data/valid_escape_characters.data create mode 100644 _test/data/valid_escape_characters.skip-ext create mode 100644 _test/data/value.data create mode 100644 _test/data/value.detect create mode 100644 _test/data/yaml.data create mode 100644 _test/data/yaml.detect create mode 100644 _test/lib/canonical.py create mode 100644 _test/lib/test_all.py create mode 100644 _test/lib/test_appliance.py create mode 100644 _test/lib/test_build.py create mode 100644 _test/lib/test_build_ext.py create mode 100644 _test/lib/test_canonical.py create mode 100644 _test/lib/test_constructor.py create mode 100644 _test/lib/test_emitter.py create mode 100644 _test/lib/test_errors.py create mode 100644 _test/lib/test_input_output.py create mode 100644 _test/lib/test_mark.py create mode 100644 _test/lib/test_reader.py create mode 100644 _test/lib/test_recursive.py create mode 100644 _test/lib/test_representer.py create mode 100644 _test/lib/test_resolver.py create mode 100644 _test/lib/test_structure.py create mode 100644 _test/lib/test_tokens.py create mode 100644 _test/lib/test_yaml.py create mode 100644 _test/lib/test_yaml_ext.py create mode 100644 _test/roundtrip.py create mode 100644 _test/test_a_dedent.py create mode 100644 _test/test_add_xxx.py create mode 100644 _test/test_anchor.py create mode 100644 _test/test_api_change.py create mode 100644 _test/test_class_register.py create mode 100644 _test/test_collections.py create mode 100644 _test/test_comment_manipulation.py create mode 100644 _test/test_comments.py create mode 100644 _test/test_contextmanager.py create mode 100644 _test/test_copy.py create mode 100644 _test/test_cyaml.py create mode 100644 _test/test_datetime.py create mode 100644 _test/test_deprecation.py create mode 100644 _test/test_documents.py create mode 100644 _test/test_fail.py create mode 100644 _test/test_float.py create mode 100644 _test/test_flowsequencekey.py create mode 100644 _test/test_indentation.py create mode 100644 _test/test_int.py create mode 100644 _test/test_issues.py create mode 100644 _test/test_json_numbers.py create mode 100644 _test/test_line_col.py create mode 100644 _test/test_literal.py create mode 100644 _test/test_none.py create mode 100644 _test/test_numpy.py create mode 100644 _test/test_program_config.py create mode 100644 _test/test_spec_examples.py create mode 100644 _test/test_string.py create mode 100644 _test/test_tag.py create mode 100644 _test/test_version.py create mode 100644 _test/test_yamlfile.py create mode 100644 _test/test_yamlobject.py create mode 100644 _test/test_z_check_debug_leftovers.py create mode 100644 _test/test_z_data.py create mode 100644 _test/test_z_olddata.py create mode 100644 lib/ruyaml/__init__.py create mode 100644 lib/ruyaml/anchor.py create mode 100644 lib/ruyaml/comments.py create mode 100644 lib/ruyaml/compat.py create mode 100644 lib/ruyaml/composer.py create mode 100644 lib/ruyaml/configobjwalker.py create mode 100644 lib/ruyaml/constructor.py create mode 100644 lib/ruyaml/cyaml.py create mode 100644 lib/ruyaml/dumper.py create mode 100644 lib/ruyaml/emitter.py create mode 100644 lib/ruyaml/error.py create mode 100644 lib/ruyaml/events.py create mode 100644 lib/ruyaml/loader.py create mode 100644 lib/ruyaml/main.py create mode 100644 lib/ruyaml/nodes.py create mode 100644 lib/ruyaml/parser.py create mode 100644 lib/ruyaml/py.typed create mode 100644 lib/ruyaml/reader.py create mode 100644 lib/ruyaml/representer.py create mode 100644 lib/ruyaml/resolver.py create mode 100644 lib/ruyaml/scalarbool.py create mode 100644 lib/ruyaml/scalarfloat.py create mode 100644 lib/ruyaml/scalarint.py create mode 100644 lib/ruyaml/scalarstring.py create mode 100644 lib/ruyaml/scanner.py create mode 100644 lib/ruyaml/serializer.py create mode 100644 lib/ruyaml/timestamp.py create mode 100644 lib/ruyaml/tokens.py create mode 100644 lib/ruyaml/util.py create mode 100644 pyproject.toml create mode 100644 setup.cfg create mode 100644 tox.ini diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..20bc262 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +.github/ @ssbarnea +* @ssbarnea @smurfix @gdubicki diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 0000000..114b5fc --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,2 @@ +# see https://github.com/ansible-community/devtools +_extends: ansible-community/devtools diff --git a/.github/workflows/ack.yml b/.github/workflows/ack.yml new file mode 100644 index 0000000..5880add --- /dev/null +++ b/.github/workflows/ack.yml @@ -0,0 +1,9 @@ +# See https://github.com/ansible-community/devtools/blob/main/.github/workflows/ack.yml +name: ack +on: + pull_request_target: + types: [opened, labeled, unlabeled, synchronize] + +jobs: + ack: + uses: ansible-community/devtools/.github/workflows/ack.yml@main diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml new file mode 100644 index 0000000..e8239f7 --- /dev/null +++ b/.github/workflows/push.yml @@ -0,0 +1,12 @@ +# See https://github.com/ansible-community/devtools/blob/main/.github/workflows/push.yml +name: push +on: + push: + branches: + - main + - 'releases/**' + - 'stable/**' + +jobs: + ack: + uses: ansible-community/devtools/.github/workflows/push.yml@main diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..d63d5b6 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,48 @@ +name: release + +on: + release: + types: [published] + +jobs: + pypi: + name: Publish to PyPI registry + environment: release + runs-on: ubuntu-20.04 + + env: + FORCE_COLOR: 1 + PY_COLORS: 1 + TOXENV: packaging + TOX_PARALLEL_NO_SPINNER: 1 + + steps: + - name: Switch to using Python 3.8 by default + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install tox + run: >- + python3 -m + pip install + --user + tox + - name: Check out src from Git + uses: actions/checkout@v2 + with: + fetch-depth: 0 # needed by setuptools-scm + - name: Build dists + run: python -m tox + - name: Publish to test.pypi.org + if: >- # "create" workflows run separately from "push" & "pull_request" + github.event_name == 'release' + uses: pypa/gh-action-pypi-publish@master + with: + password: ${{ secrets.testpypi_password }} + repository_url: https://test.pypi.org/legacy/ + - name: Publish to pypi.org + if: >- # "create" workflows run separately from "push" & "pull_request" + github.event_name == 'release' + uses: pypa/gh-action-pypi-publish@master + with: + password: ${{ secrets.pypi_password }} diff --git a/.github/workflows/tox.yml b/.github/workflows/tox.yml new file mode 100644 index 0000000..683ad46 --- /dev/null +++ b/.github/workflows/tox.yml @@ -0,0 +1,76 @@ +name: gh + +on: + pull_request: +jobs: + gh: + name: ${{ matrix.name }} + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + include: + - name: linters + python-version: 3.6 + # - name: docs + # python-version: 3.6 + # continue-on-error: true + - name: packaging + python-version: 3.6 + - name: py36 + python-version: 3.6 + - name: py37 + python-version: 3.7 + - name: py38 + python-version: 3.8 + - name: py39 + python-version: 3.9 + - name: py310 + python-version: "3.10" + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # needed by setuptools-scm + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: >- + Log the currently selected Python + version info (${{ matrix.python-version }}) + run: | + python --version --version + which python + - name: Pip cache + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ env.PY_SHA256 }}-${{ hashFiles('setup.cfg', 'tox.ini', 'pyproject.toml', '.pre-commit-config.yaml', 'pytest.ini') }} + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + - name: Install tox + run: | + python3 -m pip install --upgrade tox + - name: Log installed dists + run: >- + python3 -m pip freeze --all + - name: "Test with tox" + run: | + python3 -m tox + env: + TOXENV: ${{ matrix.name }} + - name: Archive logs + uses: actions/upload-artifact@v2 + with: + name: logs.zip + path: .tox/**/log/ + check: + needs: + - gh + runs-on: ubuntu-latest + steps: + - name: Report success of the test matrix + run: >- + print("All's good") + shell: python diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e2bbb9d --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +/.tox/ +/build/ +/dist/ +/.eggs/ +/.pybuild/ +*.egg-info/ + +__pycache__ +/_doc/_build/ diff --git a/.hgignore b/.hgignore new file mode 100644 index 0000000..4c733f2 --- /dev/null +++ b/.hgignore @@ -0,0 +1,16 @@ +# this should only include project specific files. Ignores that are valid for other +# ruamel. projects like e.g. the directory .tox should go in the file pointed to by +# the ui->ignore entry in ~/.hgrc (mercurial doesn't conform to the XDG Base Directory +# Specification): +# [ui] +# ignore = ~/.hgext/hgignore + +syntax: glob + +# _yaml.so +venv +TODO.rst +try_* +_doc/*.pdf +_doc/*.rst +*.py_alt diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..fce2997 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,48 @@ +--- +exclude: | + (?x)( + ^docs/conf.py$| + ^_test/data/.*$ + ) +repos: + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + - repo: https://github.com/python/black.git + rev: 21.12b0 + hooks: + - id: black + language_version: python3 + - repo: https://github.com/pre-commit/pre-commit-hooks.git + rev: v4.0.1 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: mixed-line-ending + - id: check-byte-order-marker + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: debug-statements + language_version: python3 + - repo: https://gitlab.com/pycqa/flake8.git + rev: 3.9.2 + hooks: + - id: flake8 + additional_dependencies: + - pydocstyle>=5.1.1 + # - flake8-black>=0.1.1 + - flake8-bugbear>=20.11.1 + language_version: python3 + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v0.910-1 + hooks: + - id: mypy + # empty args needed in order to match mypy cli behavior + args: ['--allow-redefinition'] + entry: mypy lib/ + pass_filenames: false + additional_dependencies: + - packaging + - rich + - subprocess-tee>=0.1.4 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..3180eb0 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,13 @@ +version: 2 + +sphinx: + configuration: _doc/conf.py + +formats: [epub, pdf] + +python: + version: 3.7 + install: + - method: pip + path: . + extra_requirements: [docs] diff --git a/CHANGES b/CHANGES new file mode 100644 index 0000000..8a3c5d2 --- /dev/null +++ b/CHANGES @@ -0,0 +1,1085 @@ +[0, 90, 0]: 2020-10-27 + - UNRELEASED + - Renaming the project to "ruyaml". + - Removed remains of Python 2 compatiblity + - Removed documentation's dependency on "ryd" tool + +[0, 17, 17]: 2021-10-31 + - extract timestamp matching/creation to util + +[0, 17, 16]: 2021-08-28 + - also handle issue 397 when comment is newline + +[0, 17, 15]: 2021-08-28 + - fix issue 397, insert comment before key when a comment between key and value exists + (reported by `Bastien gerard `__) + +[0, 17, 14]: 2021-08-25 + - fix issue 396, inserting key/val in merged-in dictionary (reported by `Bastien gerard + `__) + +[0, 17, 13]: 2021-08-21 + - minor fix in attr handling + +[0, 17, 12]: 2021-08-21 + - fix issue with anchor on registered class not preserved and those classes using package + attrs with `@attr.s()` (both reported by `ssph `__) + +[0, 17, 11]: 2021-08-19 + - fix error baseclass for ``DuplicateKeyErorr`` (reported by `Åukasz Rogalski + `__) + - fix typo in reader error message, causing `KeyError` during reader error + (reported by `MTU `__) + +[0, 17, 10]: 2021-06-24 + - fix issue 388, token with old comment structure != two elements + (reported by `Dimitrios Bariamis `__) + +[0, 17, 9]: 2021-06-10 + - fix issue with updating CommentedMap (reported by sri on + `StackOverlow `__) + +[0, 17, 8]: 2021-06-09 + - fix for issue 387 where templated anchors on tagged object did get set + resulting in potential id reuse. (reported by `Artem Ploujnikov + `__) + +[0, 17, 7]: 2021-05-31 + - issue 385 also affected other deprecated loaders (reported via email + by Oren Watson) + +[0, 17, 6]: 2021-05-31 + - merged type annotations update provided by + `Jochen Sprickerhof `__ + - fix for issue 385: deprecated round_trip_loader function not working + (reported by `Mike Gouline `__) + - wasted a few hours getting rid of mypy warnings/errors + +[0, 17, 5]: 2021-05-30 + - fix for issue 384 !!set with aliased entry resulting in broken YAML on rt + reported by `William Kimball `__) + +[0, 17, 4]: 2021-04-07 + - prevent (empty) comments from throwing assertion error (issue 351 + reported by `William Kimball `__) + comments (or empty line) will be dropped + +[0, 17, 3]: 2021-04-07 + - fix for issue 382 caused by an error in a format string (reported by + `William Kimball `__) + - allow expansion of aliases by setting ``yaml.composer.return_alias = lambda s: copy.deepcopy(s)`` + (as per `Stackoverflow answer `__) + +[0, 17, 2]: 2021-03-29 + - change -py2.py3-none-any.whl to -py3-none-any.whl, and remove 0.17.1 + +[0, 17, 1]: 2021-03-29 + - added 'Programming Language :: Python :: 3 :: Only', and removing + 0.17.0 from PyPI (reported by `Alasdair Nicol `__) + +[0, 17, 0]: 2021-03-26 + - this release no longer supports Python 2.7, most if not all Python 2 + specific code is removed. The 0.17.x series is the last to support Python 3.5 + (this also allowed for removal of the dependency on ``ruamel.std.pathlib``) + - remove Python2 specific code branches and adaptations (u-strings) + - prepare % code for f-strings using ``_F`` + - allow PyOxidisation (`issue 324 `__ + resp. `issue 171 `__) + - replaced Python 2 compatible enforcement of keyword arguments with '*' + - the old top level *functions* ``load``, ``safe_load``, ``round_trip_load``, + ``dump``, ``safe_dump``, ``round_trip_dump``, ``scan``, ``parse``, + ``compose``, ``emit``, ``serialize`` as well as their ``_all`` variants for + multi-document streams, now issue a ``PendingDeprecationning`` (e.g. when run + from pytest, but also Python is started with ``-Wd``). Use the methods on + ``YAML()``, which have been extended. + - fix for issue 376: indentation changes could put literal/folded scalar to start + before the ``#`` column of a following comment. Effectively making the comment + part of the scalar in the output. (reported by + `Bence Nagy `__) + + +[0, 16, 13]: 2021-03-05 + - fix for issue 359: could not update() CommentedMap with keyword arguments + (reported by `Steve Franchak `__) + - fix for issue 365: unable to dump mutated TimeStamp objects + (reported by Anton Akmerov `__) + - fix for issue 371: unable to addd comment without starting space + (reported by 'Mark Grandi `__) + - fix for issue 373: recursive call to walk_tree not preserving all params + (reported by `eulores `__) + - a None value in a flow-style sequence is now dumped as `null` instead + of `!!null ''` (reported by mcarans on + `StackOverlow `__) + +[0, 16, 12]: 2020-09-04 + - update links in doc + +[0, 16, 11]: 2020-09-03 + - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco + https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 ) + +[0, 16, 10]: 2020-02-12 + - (auto) updated image references in README to sourceforge + +[0, 16, 9]: 2020-02-11 + - update CHANGES + +[0, 16, 8]: 2020-02-11 + - update requirements so that ruamel.yaml.clib is installed for 3.8, + as it has become available (via manylinux builds) + +[0, 16, 7]: 2020-01-30 + - fix typchecking issue on TaggedScalar (reported by Jens Nielsen) + - fix error in dumping literal scalar in sequence with comments before element + (reported by `EJ Etherington `__) + +[0, 16, 6]: 2020-01-20 + - fix empty string mapping key roundtripping with preservation of quotes as `? ''` + (reported via email by Tomer Aharoni). + - fix incorrect state setting in class constructor (reported by `Douglas Raillard + `__) + - adjust deprecation warning test for Hashable, as that no longer warns (reported + by `Jason Montleon `__) + +[0, 16, 5]: 2019-08-18 + - allow for ``YAML(typ=['unsafe', 'pytypes'])`` + +[0, 16, 4]: 2019-08-16 + - fix output of TAG directives with # (reported by `Thomas Smith + `__) + + +[0, 16, 3]: 2019-08-15 + - move setting of version based on YAML directive to scanner, allowing to + check for file version during TAG directive scanning + +[0, 16, 2]: 2019-08-15 + - preserve YAML and TAG directives on roundtrip, correctly output # + in URL for YAML 1.2 (both reported by `Thomas Smith + `__) + +[0, 16, 1]: 2019-08-08 + - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz + `__) + - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by + `Thomas Smith + `__) + +[0, 16, 0]: 2019-07-25 + - split of C source that generates .so file to ruamel.yaml.clib + - duplicate keys are now an error when working with the old API as well + +[0, 15, 100]: 2019-07-17 + - fixing issue with dumping deep-copied data from commented YAML, by + providing both the memo parameter to __deepcopy__, and by allowing + startmarks to be compared on their content (reported by `Theofilos + Petsios + `__) + +[0, 15, 99]: 2019-07-12 + - add `py.typed` to distribution, based on a PR submitted by + `Michael Crusoe + `__ + - merge PR 40 (also by Michael Crusoe) to more accurately specify + repository in the README (also reported in a misunderstood issue + some time ago) + +[0, 15, 98]: 2019-07-09 + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed + for Python 3.8.0b2 (reported by `John Vandenberg + `__) + +[0, 15, 97]: 2019-06-06 + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for + Python 3.8.0b1 + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for + Python 3.8.0a4 (reported by `Anthony Sottile + `__) + +[0, 15, 96]: 2019-05-16 + - fix failure to indent comments on round-trip anchored block style + scalars in block sequence (reported by `William Kimball + `__) + +[0, 15, 95]: 2019-05-16 + - fix failure to round-trip anchored scalars in block sequence + (reported by `William Kimball + `__) + - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18 + `__) + +[0, 15, 94]: 2019-04-23 + - fix missing line-break after end-of-file comments not ending in + line-break (reported by `Philip Thompson + `__) + +[0, 15, 93]: 2019-04-21 + - fix failure to parse empty implicit flow mapping key + - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now + correctly recognised as booleans and such strings dumped quoted + (reported by `Marcel Bollmann + `__) + +[0, 15, 92]: 2019-04-16 + - fix failure to parse empty implicit block mapping key (reported by + `Nolan W `__) + +[0, 15, 91]: 2019-04-05 + - allowing duplicate keys would not work for merge keys (reported by mamacdon on + `StackOverflow `__ + +[0, 15, 90]: 2019-04-04 + - fix issue with updating `CommentedMap` from list of tuples (reported by + `Peter Henry `__) + +[0, 15, 90]: 2019-04-04 + - fix issue with updating `CommentedMap` from list of tuples (reported by + `Peter Henry `__) + +[0, 15, 90]: 2019-04-04 + - fix issue with updating `CommentedMap` from list of tuples (reported by + `Peter Henry `__) + +[0, 15, 89]: 2019-02-27 + - fix for items with flow-mapping in block sequence output on single line + (reported by `Zahari Dim `__) + - fix for safe dumping erroring in creation of representereror when dumping namedtuple + (reported and solution by `Jaakko Kantojärvi `__) + +[0, 15, 88]: 2019-02-12 + - fix inclusing of python code from the subpackage data (containing extra tests, + reported by `Florian Apolloner `__) + +[0, 15, 87]: 2019-01-22 + - fix problem with empty lists and the code to reinsert merge keys (reported via email + by Zaloo) + +[0, 15, 86]: 2019-01-16 + - reinsert merge key in its old position (reported by grumbler on + `__) + - fix for issue with non-ASCII anchor names (reported and fix + provided by Dandaleon Flux via email) + - fix for issue when parsing flow mapping value starting with colon (in pure Python only) + (reported by `FichteFoll `__) + +[0, 15, 85]: 2019-01-08 + - the types used by `SafeConstructor` for mappings and sequences can + now by set by assigning to `XXXConstructor.yaml_base_dict_type` + (and `..._list_type`), preventing the need to copy two methods + with 50+ lines that had `var = {}` hardcoded. (Implemented to + help solve an feature request by `Anthony Sottile + `__ in an easier way) + +[0, 15, 84]: 2019-01-07 + - fix for `CommentedMap.copy()` not returning `CommentedMap`, let alone copying comments etc. + (reported by `Anthony Sottile `__) + +[0, 15, 83]: 2019-01-02 + - fix for bug in roundtripping aliases used as key (reported via email by Zaloo) + +[0, 15, 82]: 2018-12-28 + - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors + do not need a referring alias for these (reported by + `Alex Harvey `__) + - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo + `__) + +[0, 15, 81]: 2018-12-06 + - fix issue saving methods of metaclass derived classes (reported and fix provided + by `Douglas Raillard `__) + +[0, 15, 80]: 2018-11-26 + - fix issue emitting BEL character when round-tripping invalid folded input + (reported by Isaac on `StackOverflow `__) + +[0, 15, 79]: 2018-11-21 + - fix issue with anchors nested deeper than alias (reported by gaFF on + `StackOverflow `__) + +[0, 15, 78]: 2018-11-15 + - fix setup issue for 3.8 (reported by `Sidney Kuyateh + `__) + +[0, 15, 77]: 2018-11-09 + - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent + explicit sorting by keys in the base representer of mappings. Roundtrip + already did not do this. Usage only makes real sense for Python 3.6+ + (feature request by `Sebastian Gerber `__). + - implement Python version check in YAML metadata in ``_test/test_z_data.py`` + +[0, 15, 76]: 2018-11-01 + - fix issue with empty mapping and sequence loaded as flow-style + (mapping reported by `Min RK `__, sequence + by `Maged Ahmed `__) + +[0, 15, 75]: 2018-10-27 + - fix issue with single '?' scalar (reported by `Terrance + `__) + - fix issue with duplicate merge keys (prompted by `answering + `__ a + `StackOverflow question `__ + by `math `__) + +[0, 15, 74]: 2018-10-17 + - fix dropping of comment on rt before sequence item that is sequence item + (reported by `Thorsten Kampe `__) + +[0, 15, 73]: 2018-10-16 + - fix irregular output on pre-comment in sequence within sequence (reported + by `Thorsten Kampe `__) + - allow non-compact (i.e. next line) dumping sequence/mapping within sequence. + +[0, 15, 72]: 2018-10-06 + - fix regression on explicit 1.1 loading with the C based scanner/parser + (reported by `Tomas Vavra `__) + +[0, 15, 71]: 2018-09-26 + - fix regression where handcrafted CommentedMaps could not be initiated (reported by + `Dan Helfman `__) + - fix regression with non-root literal scalars that needed indent indicator + (reported by `Clark Breyman `__) + - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3 + (reported by `Douglas RAILLARD `__) + +[0, 15, 70]: 2018-09-21 + - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list, + reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON + dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``. + (Proposed by `Stuart Berg `__, with feedback + from `blhsing `__ on + `StackOverflow `__) + +[0, 15, 69]: 2018-09-20 + - fix issue with dump_all gobbling end-of-document comments on parsing + (reported by `Pierre B. `__) + +[0, 15, 68]: 2018-09-20 + - fix issue with parsabel, but incorrect output with nested flow-style sequences + (reported by `Dougal Seeley `__) + - fix issue with loading Python objects that have __setstate__ and recursion in parameters + (reported by `Douglas RAILLARD `__) + +[0, 15, 67]: 2018-09-19 + - fix issue with extra space inserted with non-root literal strings + (Issue reported and PR with fix provided by + `Naomi Seyfer `__.) + +[0, 15, 66]: 2018-09-07 + - fix issue with fold indicating characters inserted in safe_load-ed folded strings + (reported by `Maximilian Hils `__). + +[0, 15, 65]: 2018-09-07 + - fix issue #232 revert to throw ParserError for unexcpected ``]`` + and ``}`` instead of IndexError. (Issue reported and PR with fix + provided by `Naomi Seyfer `__.) + - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email) + - indent root level literal scalars that have directive or document end markers + at the beginning of a line + +[0, 15, 64]: 2018-08-30 + - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]`` + - single entry mappings in flow sequences now written by default without quotes + set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force + getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]`` + - fix issue when roundtripping floats starting with a dot such as ``.5`` + (reported by `Harrison Gregg `__) + +[0, 15, 63]: 2018-08-29 + - small fix only necessary for Windows users that don't use wheels. + +[0, 15, 62]: 2018-08-29 + - C based reader/scanner & emitter now allow setting of 1.2 as YAML version. + ** The loading/dumping is still YAML 1.1 code**, so use the common subset of + YAML 1.2 and 1.1 (reported by `Ge Yang `__) + +[0, 15, 61]: 2018-08-23 + - support for round-tripping folded style scalars (initially requested + by `Johnathan Viduchinsky `__) + - update of C code + - speed up of scanning (~30% depending on the input) + +[0, 15, 60]: 2018-08-18 + - cleanup for mypy + - spurious print in library (reported by + `Lele Gaifax `__), now automatically checked + +[0, 15, 59]: 2018-08-17 + - issue with C based loader and leading zeros (reported by + `Tom Hamilton Stubber `__) + +[0, 15, 58]: 2018-08-17 + - simple mappings can now be used as keys when round-tripping:: + + {a: 1, b: 2}: hello world + + although using the obvious operations (del, popitem) on the key will + fail, you can mutilate it by going through its attributes. If you load the + above YAML in `d`, then changing the value is cumbersome: + + d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"} + + and changing the key even more so: + + d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop( + CommentedKeyMap([('a', 1), ('b', 2)])) + + (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result + in a different order, of the keys of the key, in the output) + - check integers to dump with 1.2 patterns instead of 1.1 (reported by + `Lele Gaifax `__) + + +[0, 15, 57]: 2018-08-15 + - Fix that CommentedSeq could no longer be used in adding or do a copy + (reported by `Christopher Wright `__) + +[0, 15, 56]: 2018-08-15 + - fix issue with ``python -O`` optimizing away code (reported, and detailed cause + pinpointed, by `Alex Grönholm `__ + +[0, 15, 55]: 2018-08-14 + + - unmade ``CommentedSeq`` a subclass of ``list``. It is now + indirectly a subclass of the standard + ``collections.abc.MutableSequence`` (without .abc if you are + still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'), + list)``) anywhere in your code replace ``list`` with + ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of + the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``, + with the result that *(extended) slicing is supported on + ``CommentedSeq``*. + (reported by `Stuart Berg `__) + - duplicate keys (or their values) with non-ascii now correctly + report in Python2, instead of raising a Unicode error. + (Reported by `Jonathan Pyle `__) + +[0, 15, 54]: 2018-08-13 + + - fix issue where a comment could pop-up twice in the output (reported by + `Mike Kazantsev `__ and by + `Nate Peterson `__) + - fix issue where JSON object (mapping) without spaces was not parsed + properly (reported by `Marc Schmidt `__) + - fix issue where comments after empty flow-style mappings were not emitted + (reported by `Qinfench Chen `__) + +[0, 15, 53]: 2018-08-12 + - fix issue with flow style mapping with comments gobbled newline (reported + by `Christopher Lambert `__) + - fix issue where single '+' under YAML 1.2 was interpreted as + integer, erroring out (reported by `Jethro Yu + `__) + +[0, 15, 52]: 2018-08-09 + - added `.copy()` mapping representation for round-tripping + (``CommentedMap``) to fix incomplete copies of merged mappings + (reported by `Will Richards + `__) + - Also unmade that class a subclass of ordereddict to solve incorrect behaviour + for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported by + `Filip Matzner `__) + +[0, 15, 51]: 2018-08-08 + - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard + `__) + - Fix spurious trailing white-space caused when the comment start + column was no longer reached and there was no actual EOL comment + (e.g. following empty line) and doing substitutions, or when + quotes around scalars got dropped. (reported by `Thomas Guillet + `__) + +[0, 15, 50]: 2018-08-05 + - Allow ``YAML()`` as a context manager for output, thereby making it much easier + to generate multi-documents in a stream. + - Fix issue with incorrect type information for `load()` and `dump()` (reported + by `Jimbo Jim `__) + +[0, 15, 49]: 2018-08-05 + - fix preservation of leading newlines in root level literal style scalar, + and preserve comment after literal style indicator (``| # some comment``) + Both needed for round-tripping multi-doc streams in + `ryd `__. + +[0, 15, 48]: 2018-08-03 + - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity + +[0, 15, 47]: 2018-07-31 + - fix broken 3.6 manylinux1 (result of an unclean ``build`` (reported by + `Roman Sichnyi `__) + + +[0, 15, 46]: 2018-07-29 + - fixed DeprecationWarning for importing from ``collections`` on 3.7 + (issue 210, reported by `Reinoud Elhorst + `__). It was `difficult to find + why tox/pytest did not report + `__ and as time + consuming to actually `fix + `__ the tests. + +[0, 15, 45]: 2018-07-26 + - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration + (PR provided by `Zachary Buhman `__, + also reported by `Steven Hiscocks `__. + +[0, 15, 44]: 2018-07-14 + - Correct loading plain scalars consisting of numerals only and + starting with `0`, when not explicitly specifying YAML version + 1.1. This also fixes the issue about dumping string `'019'` as + plain scalars as reported by `Min RK + `__, that prompted this chance. + +[0, 15, 43]: 2018-07-12 + - merge PR33: Python2.7 on Windows is narrow, but has no + ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by + `Marcel Bargull `__) + - ``register_class()`` now returns class (proposed by + `Mike Nerone `__} + +[0, 15, 42]: 2018-07-01 + - fix regression showing only on narrow Python 2.7 (py27mu) builds + (with help from + `Marcel Bargull `__ and + `Colm O'Connor <>`__). + - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as + 3.4/3.5/3.6/3.7/pypy + +[0, 15, 41]: 2018-06-27 + - add detection of C-compile failure (investigation prompted by + `StackOverlow `__ by + `Emmanuel Blot `__), + which was removed while no longer dependent on ``libyaml``, C-extensions + compilation still needs a compiler though. + +[0, 15, 40]: 2018-06-18 + - added links to landing places as suggested in issue 190 by + `KostisA `__ + - fixes issue #201: decoding unicode escaped tags on Python2, reported + by `Dan Abolafia `__ + +[0, 15, 39]: 2018-06-16 + - merge PR27 improving package startup time (and loading when regexp not + actually used), provided by + `Marcel Bargull `__ + +[0, 15, 38]: 2018-06-13 + - fix for losing precision when roundtripping floats by + `Rolf Wojtech `__ + - fix for hardcoded dir separator not working for Windows by + `Nuno André `__ + - typo fix by `Andrey Somov `__ + +[0, 15, 37]: 2018-03-21 + - again trying to create installable files for 187 + +[0, 15, 36]: 2018-02-07 + - fix issue 187, incompatibility of C extension with 3.7 (reported by + Daniel Blanchard) + +[0, 15, 35]: 2017-12-03 + - allow ``None`` as stream when specifying ``transform`` parameters to + ``YAML.dump()``. + This is useful if the transforming function doesn't return a meaningful value + (inspired by `StackOverflow `__ by + `rsaw `__). + +[0, 15, 34]: 2017-09-17 + - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka) + +[0, 15, 33]: 2017-08-31 + - support for "undefined" round-tripping tagged scalar objects (in addition to + tagged mapping object). Inspired by a use case presented by Matthew Patton + on `StackOverflow `__. + - fix issue 148: replace cryptic error message when using !!timestamp with an + incorrectly formatted or non- scalar. Reported by FichteFoll. + +[0, 15, 32]: 2017-08-21 + - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for + for ``typ='rt'``. + - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float`` + +[0, 15, 31]: 2017-08-15 + - fix Comment dumping + +[0, 15, 30]: 2017-08-14 + - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}`` + (reported on `StackOverflow `_ by + `mjalkio `_ + +[0, 15, 29]: 2017-08-14 + - fix issue #51: different indents for mappings and sequences (reported by + Alex Harvey) + - fix for flow sequence/mapping as element/value of block sequence with + sequence-indent minus dash-offset not equal two. + +[0, 15, 28]: 2017-08-13 + - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron) + +[0, 15, 27]: 2017-08-13 + - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious + (reported by nowox) + - fix lists within lists which would make comments disappear + +[0, 15, 26]: 2017-08-10 + - fix for disappearing comment after empty flow sequence (reported by + oit-tzhimmash) + +[0, 15, 25]: 2017-08-09 + - fix for problem with dumping (unloaded) floats (reported by eyenseo) + +[0, 15, 24]: 2017-08-09 + - added ScalarFloat which supports roundtripping of 23.1, 23.100, + 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas + are not preserved/supported (yet, is anybody using that?). + - (finally) fixed longstanding issue 23 (reported by `Antony Sottile + `_), now handling comment between block + mapping key and value correctly + - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML + provided by Cecil Curry) + - allow setting of boolean representation (`false`, `true`) by using: + ``yaml.boolean_representation = [u'False', u'True']`` + +[0, 15, 23]: 2017-08-01 + - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina) + +[0, 15, 22]: 2017-07-28 + - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina) + +[0, 15, 21]: 2017-07-25 + - fix for writing unicode in new API, https://stackoverflow.com/a/45281922/1307905 + +[0, 15, 20]: 2017-07-23 + - wheels for windows including C extensions + +[0, 15, 19]: 2017-07-13 + - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject. + - fix for problem using load_all with Path() instance + - fix for load_all in combination with zero indent block style literal + (``pure=True`` only!) + +[0, 15, 18]: 2017-07-04 + - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag + constructor for `including YAML files in a YAML file + `_ + - some documentation improvements + - trigger of doc build on new revision + +[0, 15, 17]: 2017-07-03 + - support for Unicode supplementary Plane **output** with allow_unicode + (input was already supported, triggered by + `this `_ Stack Overflow Q&A) + +[0, 15, 16]: 2017-07-01 + - minor typing issues (reported and fix provided by + `Manvendra Singh `_) + - small doc improvements + +[0, 15, 15]: 2017-06-27 + - fix for issue 135, typ='safe' not dumping in Python 2.7 + (reported by Andrzej Ostrowski `_) + +[0, 15, 14]: 2017-06-25 + - setup.py: change ModuleNotFoundError to ImportError (reported and fix by Asley Drake) + +[0, 15, 13]: 2017-06-24 + - suppress duplicate key warning on mappings with merge keys (reported by + Cameron Sweeney) + +[0, 15, 12]: 2017-06-24 + - remove fatal dependency of setup.py on wheel package (reported by + Cameron Sweeney) + +[0, 15, 11]: 2017-06-24 + - fix for issue 130, regression in nested merge keys (reported by + `David Fee `_) + +[0, 15, 10]: 2017-06-23 + - top level PreservedScalarString not indented if not explicitly asked to + - remove Makefile (not very useful anyway) + - some mypy additions + +[0, 15, 9]: 2017-06-16 + - fix for issue 127: tagged scalars were always quoted and seperated + by a newline when in a block sequence (reported and largely fixed by + `Tommy Wang `_) + +[0, 15, 8]: 2017-06-15 + - allow plug-in install via ``install ruamel.yaml[jinja2]`` + +[0, 15, 7]: 2017-06-14 + - add plug-in mechanism for load/dump pre resp. post-processing + +[0, 15, 6]: 2017-06-10 + - a set() with duplicate elements now throws error in rt loading + - support for toplevel column zero literal/folded scalar in explicit documents + +[0, 15, 5]: 2017-06-08 + - repeat `load()` on a single `YAML()` instance would fail. + +(0, 15, 4) 2017-06-08: | + - `transform` parameter on dump that expects a function taking a + string and returning a string. This allows transformation of the output + before it is written to stream. + - some updates to the docs + +(0, 15, 3) 2017-06-07: + - No longer try to compile C extensions on Windows. Compilation can be forced by setting + the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value + before starting the `pip install`. + +(0, 15, 2) 2017-06-07: + - update to conform to mypy 0.511:mypy --strict + +(0, 15, 1) 2017-06-07: + - Any `duplicate keys `_ + in mappings generate an error (in the old API this change generates a warning until 0.16) + - dependecy on ruamel.ordereddict for 2.7 now via extras_require + +(0, 15, 0) 2017-06-04: + - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all + load/dump functions + - passing in a non-supported object (e.g. a string) as "stream" will result in a + much more meaningful YAMLStreamError. + - assigning a normal string value to an existing CommentedMap key or CommentedSeq + element will result in a value cast to the previous value's type if possible. + +(0, 14, 12) 2017-05-14: + - fix for issue 119, deepcopy not returning subclasses (reported and PR by + Constantine Evans ) + +(0, 14, 11) 2017-05-01: + - fix for issue 103 allowing implicit documents after document end marker line (``...``) + in YAML 1.2 + +(0, 14, 10) 2017-04-26: + - fix problem with emitting using cyaml + +(0, 14, 9) 2017-04-22: + - remove dependency on ``typing`` while still supporting ``mypy`` + (http://stackoverflow.com/a/43516781/1307905) + - fix unclarity in doc that stated 2.6 is supported (reported by feetdust) + +(0, 14, 8) 2017-04-19: + - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards + on all files (reported by `João Paulo Magalhães `_) + +(0, 14, 7) 2017-04-18: + - round trip of integers (decimal, octal, hex, binary) now preserve + leading zero(s) padding and underscores. Underscores are presumed + to be at regular distances (i.e. ``0o12_345_67`` dumps back as + ``0o1_23_45_67`` as the space from the last digit to the + underscore before that is the determining factor). + +(0, 14, 6) 2017-04-14: + - binary, octal and hex integers are now preserved by default. This + was a known deficiency. Working on this was prompted by the issue report (112) + from devnoname120, as well as the additional experience with `.replace()` + on `scalarstring` classes. + - fix issues 114 cannot install on Buildozer (reported by mixmastamyk). + Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check. + +(0, 14, 5) 2017-04-04: + - fix issue 109 None not dumping correctly at top level (reported by Andrea Censi) + - fix issue 110 .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString + would give back "normal" string (reported by sandres23) + +(0, 14, 4) 2017-03-31: + - fix readme + +(0, 14, 3) 2017-03-31: + - fix for 0o52 not being a string in YAML 1.1 (reported on + `StackOverflow Q&A 43138503>`_ by + `Frank D `_ + +(0, 14, 2) 2017-03-23: + - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch) + +(0.14.1) 2017-03-22: + - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré) + +(0.14.0) 2017-03-21: + - updates for mypy --strict + - preparation for moving away from inheritance in Loader and Dumper, calls from e.g. + the Representer to the Serializer.serialize() are now done via the attribute + .serializer.serialize(). Usage of .serialize() outside of Serializer will be + deprecated soon + - some extra tests on main.py functions + +(0.13.14) 2017-02-12: + - fix for issue 97, clipped block scalar followed by empty lines and comment + would result in two CommentTokens of which the first was dropped. + (reported by Colm O'Connor) + +(0.13.13) 2017-01-28: + - fix for issue 96, prevent insertion of extra empty line if indented mapping entries + are separated by an empty line (reported by Derrick Sawyer) + +(0.13.11) 2017-01-23: + - allow ':' in flow style scalars if not followed by space. Also don't + quote such scalar as this is no longer necessary. + - add python 3.6 manylinux wheel to PyPI + +(0.13.10) 2017-01-22: + - fix for issue 93, insert spurious blank line before single line comment + between indented sequence elements (reported by Alex) + +(0.13.9) 2017-01-18: + - fix for issue 92, wrong import name reported by the-corinthian + +(0.13.8) 2017-01-18: + - fix for issue 91, when a compiler is unavailable reported by Maximilian Hils + - fix for deepcopy issue with TimeStamps not preserving 'T', reported on + `StackOverflow Q&A `_ by + `Quuxplusone `_ + +(0.13.7) 2016-12-27: + - fix for issue 85, constructor.py importing unicode_literals caused mypy to fail + on 2.7 (reported by Peter Amstutz) + +(0.13.6) 2016-12-27: + - fix for issue 83, collections.OrderedDict not representable by SafeRepresenter + (reported by Frazer McLean) + +(0.13.5) 2016-12-25: + - fix for issue 84, deepcopy not properly working (reported by Peter Amstutz) + +(0.13.4) 2016-12-05: + - another fix for issue 82, change to non-global resolver data broke implicit type + specification + +(0.13.3) 2016-12-05: + - fix for issue 82, deepcopy not working (reported by code monk) + +(0.13.2) 2016-11-28: + - fix for comments after empty (null) values (reported by dsw2127 and cokelaer) + +(0.13.1) 2016-11-22: + - optimisations on memory usage when loading YAML from large files (py3 -50%, py2 -85%) + +(0.13.0) 2016-11-20: + - if ``load()`` or ``load_all()`` is called with only a single argument + (stream or string) + a UnsafeLoaderWarning will be issued once. If appropriate you can surpress this + warning by filtering it. Explicitly supplying the ``Loader=ruamel.yaml.Loader`` + argument, will also prevent it from being issued. You should however consider + using ``safe_load()``, ``safe_load_all()`` if your YAML input does not use tags. + - allow adding comments before and after keys (based on + `StackOveflow Q&A `_ by + `msinn `_) + +(0.12.18) 2016-11-16: + - another fix for numpy (re-reported independently by PaulG & Nathanial Burdic) + +(0.12.17) 2016-11-15: + - only the RoundTripLoader included the Resolver that supports YAML 1.2 + now all loaders do (reported by mixmastamyk) + +(0.12.16) 2016-11-13: + - allow dot char (and many others) in anchor name + Fix issue 72 (reported by Shalon Wood) + - | + Slightly smarter behaviour dumping strings when no style is + specified. Single string scalars that start with single quotes + or have newlines now are dumped double quoted "'abc\nklm'" instead of + + '''abc + + klm''' + +(0.12.14) 2016-09-21: + - preserve round-trip sequences that are mapping keys + (prompted by stackoverflow question 39595807 from Nowox) + +(0.12.13) 2016-09-15: + - Fix for issue #60 representation of CommentedMap with merge + keys incorrect (reported by Tal Liron) + +(0.12.11) 2016-09-06: + - Fix issue 58 endless loop in scanning tokens (reported by + Christopher Lambert) + +(0.12.10) 2016-09-05: + - Make previous fix depend on unicode char width (32 bit unicode support + is a problem on MacOS reported by David Tagatac) + +(0.12.8) 2016-09-05: + - To be ignored Unicode characters were not properly regex matched + (no specific tests, PR by Haraguroicha Hsu) + +(0.12.7) 2016-09-03: + - fixing issue 54 empty lines with spaces (reported by Alex Harvey) + +(0.12.6) 2016-09-03: + - fixing issue 46 empty lines between top-level keys were gobbled (but + not between sequence elements, nor between keys in netsted mappings + (reported by Alex Harvey) + +(0.12.5) 2016-08-20: + - fixing issue 45 preserving datetime formatting (submitted by altuin) + Several formatting parameters are preserved with some normalisation: + - preserve 'T', 't' is replaced by 'T', multiple spaces between date + and time reduced to one. + - optional space before timezone is removed + - still using microseconds, but now rounded (.1234567 -> .123457) + - Z/-5/+01:00 preserved + +(0.12.4) 2016-08-19: + - Fix for issue 44: missing preserve_quotes keyword argument (reported + by M. Crusoe) + +(0.12.3) 2016-08-17: + - correct 'in' operation for merged CommentedMaps in round-trip mode + (implementation inspired by J.Ngo, but original not working for merges) + - iteration over round-trip loaded mappings, that contain merges. Also + keys(), items(), values() (Py3/Py2) and iterkeys(), iteritems(), + itervalues(), viewkeys(), viewitems(), viewvalues() (Py2) + - reuse of anchor name now generates warning, not an error. Round-tripping such + anchors works correctly. This inherited PyYAML issue was brought to attention + by G. Coddut (and was long standing https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=515634) + suppressing the warning:: + + import warnings + from ruamel.yaml.error import ReusedAnchorWarning + warnings.simplefilter("ignore", ReusedAnchorWarning) + +(0.12.2) 2016-08-16: + - minor improvements based on feedback from M. Crusoe + https://bitbucket.org/ruamel/yaml/issues/42/ + +(0.12.0) 2016-08-16: + - drop support for Python 2.6 + - include initial Type information (inspired by M. Crusoe) + +(0.11.15) 2016-08-07: + - Change to prevent FutureWarning in NumPy, as reported by tgehring + ("comparison to None will result in an elementwise object comparison in the future") + +(0.11.14) 2016-07-06: + - fix preserve_quotes missing on original Loaders (as reported + by Leynos, bitbucket issue 38) + +(0.11.13) 2016-07-06: + - documentation only, automated linux wheels + +(0.11.12) 2016-07-06: + - added support for roundtrip of single/double quoted scalars using: + ruamel.yaml.round_trip_load(stream, preserve_quotes=True) + +(0.11.10) 2016-05-02: + +- added .insert(pos, key, value, comment=None) to CommentedMap + +(0.11.10) 2016-04-19: + +- indent=2, block_seq_indent=2 works as expected + +(0.11.0) 2016-02-18: + - RoundTripLoader loads 1.2 by default (no sexagesimals, 012 octals nor + yes/no/on/off booleans + +(0.10.11) 2015-09-17: +- Fix issue 13: dependency on libyaml to be installed for yaml.h + +(0.10.10) 2015-09-15: +- Python 3.5 tested with tox +- pypy full test (old PyYAML tests failed on too many open file handles) + +(0.10.6-0.10.9) 2015-09-14: +- Fix for issue 9 +- Fix for issue 11: double dump losing comments +- Include libyaml code +- move code from 'py' subdir for proper namespace packaging. + +(0.10.5) 2015-08-25: +- preservation of newlines after block scalars. Contributed by Sam Thursfield. + +(0.10) 2015-06-22: +- preservation of hand crafted anchor names ( not of the form "idNNN") +- preservation of map merges ( <<< ) + +(0.9) 2015-04-18: +- collections read in by the RoundTripLoader now have a ``lc`` property + that can be quired for line and column ( ``lc.line`` resp. ``lc.col``) + +(0.8) 2015-04-15: +- bug fix for non-roundtrip save of ordereddict +- adding/replacing end of line comments on block style mappings/sequences + +(0.7.2) 2015-03-29: +- support for end-of-line comments on flow style sequences and mappings + +(0.7.1) 2015-03-27: +- RoundTrip capability of flow style sequences ( 'a: b, c, d' ) + +(0.7) 2015-03-26: +- tests (currently failing) for inline sequece and non-standard spacing between + block sequence dash and scalar (Anthony Sottile) +- initial possibility (on list, i.e. CommentedSeq) to set the flow format + explicitly +- RoundTrip capability of flow style sequences ( 'a: b, c, d' ) + +(0.6.1) 2015-03-15: +- setup.py changed so ruamel.ordereddict no longer is a dependency + if not on CPython 2.x (used to test only for 2.x, which breaks pypy 2.5.0 + reported by Anthony Sottile) + +(0.6) 2015-03-11: +- basic support for scalars with preserved newlines +- html option for yaml command +- check if yaml C library is available before trying to compile C extension +- include unreleased change in PyYAML dd 20141128 + +(0.5) 2015-01-14: +- move configobj -> YAML generator to own module +- added dependency on ruamel.base (based on feedback from Sess + + +(0.4) 20141125: +- move comment classes in own module comments +- fix omap pre comment +- make !!omap and !!set take parameters. There are still some restrictions: + - no comments before the !!tag +- extra tests + +(0.3) 20141124: +- fix value comment occuring as on previous line (looking like eol comment) +- INI conversion in yaml + tests +- (hidden) test in yaml for debugging with auto command +- fix for missing comment in middel of simple map + test + +(0.2) 20141123: +- add ext/_yaml.c etc to the source tree +- tests for yaml to work on 2.6/3.3/3.4 +- change install so that you can include ruamel.yaml instead of ruamel.yaml.py +- add "yaml" utility with initial subcommands (test rt, from json) + +(0.1) 20141122: +- merge py2 and py3 code bases +- remove support for 2.5/3.0/3.1/3.2 (this merge relies on u"" as + available in 3.3 and . imports not available in 2.5) +- tox.ini for 2.7/3.4/2.6/3.3 +- remove lib3/ and tests/lib3 directories and content +- commit +- correct --verbose for test application +- DATA=changed to be relative to __file__ of code +- DATA using os.sep +- remove os.path from imports as os is already imported +- have test_yaml.py exit with value 0 on success, 1 on failures, 2 on + error +- added support for octal integers starting with '0o' + keep support for 01234 as well as 0o1234 +- commit +- added test_roundtrip_data: + requirest a .data file and .roundtrip (empty), yaml_load .data + and compare dump against original. +- fix grammar as per David Pursehouse: + https://bitbucket.org/xi/pyyaml/pull-request/5/fix-grammar-in-error-messages/diff +- http://www.json.org/ extra escaped char \/ + add .skip-ext as libyaml is not updated +- David Fraser: Extract a method to represent keys in mappings, so that + a subclass can choose not to quote them, used in repesent_mapping + https://bitbucket.org/davidfraser/pyyaml/ +- add CommentToken and percolate through parser and composer and constructor +- add Comments to wrapped mapping and sequence constructs (not to scalars) +- generate YAML with comments +- initial README diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..3f65b07 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ + The MIT License (MIT) + + Copyright (c) 2014-2021 Anthon van der Neut, Ruamel bvba + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..fa426d7 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +prune ext* +prune clib* diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..3c49a55 --- /dev/null +++ b/README.rst @@ -0,0 +1,257 @@ +ruyaml +====== + +``ruyaml`` package is a fork of ``ruamel.yaml`` aimed to made in order to +secure the future of the library, mainly by having a pool of maintainers. + +Notes +===== + +- The current version has the same API as the "ruamel.yaml" package. + However, it will install the `ruyaml` python module. Thus, simply + replace ``from ruamel import yaml`` with ``import ruyaml as yaml`` + (or equivalent) and you're all set. +- python3.6 is the minimal version of python supported + + +:version: 0.90.1 +:updated: 2021-06-10 +:documentation: http://ruyaml.readthedocs.io +:repository: https://github.com/pycontribs/ruyaml.git +:pypi: https://pypi.org/project/ruyaml/ + +*The 0.16.13 release was the last that was tested to be working on Python 2.7. +The 0.17 series will still be tested on Python 3.5, but the 0.18 will not. The +0.17 series will also stop support for the old PyYAML functions, so a `YAML()` instance +will need to be created.* + +*The 0.17 series will also see changes in how comments are attached during +roundtrip. This will result in backwards incompatibilities on the `.ca` data and +it might even be necessary for documented methods that handle comments.* + +*Please adjust your dependencies accordingly if necessary. (`ruamel.yaml<0.17`)* + + +Starting with version 0.15.0 the way YAML files are loaded and dumped +has been changing, see the API doc for details. Currently existing +functionality will throw a warning before being changed/removed. +**For production systems already using a pre 0.16 version, you should +pin the version being used with ``ruamel.yaml<=0.15``** if you cannot +fully test upgrading to a newer version. For new usage +pin to the minor version tested ( ``ruamel.yaml<=0.17``) or even to the +exact version used. + +New functionality is usually only available via the new API, so +make sure you use it and stop using the `ruamel.yaml.safe_load()`, +`ruamel.yaml.round_trip_load()` and `ruamel.yaml.load()` functions +(and their `....dump()` counterparts). + +If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop +me an email, preferably with some information on how you use the +package (or a link to the repository) and I'll keep you informed +when the status of the API is stable enough to make the transition. + +* `Overview `_ +* `Installing `_ +* `Basic Usage `_ +* `Details `_ +* `Examples `_ +* `API `_ +* `Differences with PyYAML `_ + +.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable + :target: https://yaml.readthedocs.org/en/stable + +.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge + :target: https://bestpractices.coreinfrastructure.org/projects/1128 + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw + :target: https://opensource.org/licenses/MIT + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw + :target: https://pypi.org/project/ruamel.yaml/ + +.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw + :target: https://pypi.org/project/oitnb/ + +.. image:: http://www.mypy-lang.org/static/mypy_badge.svg + :target: http://mypy-lang.org/ + +ChangeLog +========= + +.. should insert NEXT: at the beginning of line for next key (with empty line) + +0.17.17 (2021-10-31): + - extract timestamp matching/creation to util + +0.17.16 (2021-08-28): + - 398 also handle issue 397 when comment is newline + +0.17.15 (2021-08-28): + - fix issue 397, insert comment before key when a comment between key and value exists + (reported by `Bastien gerard `__) + +0.17.14 (2021-08-25): + - fix issue 396, inserting key/val in merged-in dictionary (reported by `Bastien gerard + `__) + +0.17.13 (2021-08-21): + - minor fix in attr handling + +0.17.12 (2021-08-21): + - fix issue with anchor on registered class not preserved and those classes using package + attrs with `@attr.s()` (both reported by `ssph `__) + +0.17.11 (2021-08-19): + - fix error baseclass for ``DuplicateKeyErorr`` (reported by `Åukasz Rogalski + `__) + - fix typo in reader error message, causing `KeyError` during reader error + (reported by `MTU `__) + +0.17.10 (2021-06-24): + - fix issue 388, token with old comment structure != two elements + (reported by `Dimitrios Bariamis `__) + +0.17.9 (2021-06-10): + - fix issue with updating CommentedMap (reported by sri on + `StackOverflow `__) + +0.17.8 (2021-06-09): + - fix for issue 387 where templated anchors on tagged object did get set + resulting in potential id reuse. (reported by `Artem Ploujnikov + `__) + +0.17.7 (2021-05-31): + - issue 385 also affected other deprecated loaders (reported via email + by Oren Watson) + +0.17.6 (2021-05-31): + - merged type annotations update provided by + `Jochen Sprickerhof `__ + - fix for issue 385: deprecated round_trip_loader function not working + (reported by `Mike Gouline `__) + - wasted a few hours getting rid of mypy warnings/errors + +0.17.5 (2021-05-30): + - fix for issue 384 !!set with aliased entry resulting in broken YAML on rt + reported by `William Kimball `__) + +0.17.4 (2021-04-07): + - prevent (empty) comments from throwing assertion error (issue 351 + reported by `William Kimball `__) + comments (or empty line) will be dropped + +0.17.3 (2021-04-07): + - fix for issue 382 caused by an error in a format string (reported by + `William Kimball `__) + - allow expansion of aliases by setting ``yaml.composer.return_alias = lambda s: copy.deepcopy(s)`` + (as per `Stackoverflow answer `__) + +0.17.2 (2021-03-29): + - change -py2.py3-none-any.whl to -py3-none-any.whl, and remove 0.17.1 + +0.17.1 (2021-03-29): + - added 'Programming Language :: Python :: 3 :: Only', and removing + 0.17.0 from PyPI (reported by `Alasdair Nicol `__) + +0.17.0 (2021-03-26): + - removed because of incomplete classifiers + - this release no longer supports Python 2.7, most if not all Python 2 + specific code is removed. The 0.17.x series is the last to support Python 3.5 + (this also allowed for removal of the dependency on ``ruamel.std.pathlib``) + - remove Python2 specific code branches and adaptations (u-strings) + - prepare % code for f-strings using ``_F`` + - allow PyOxidisation (`issue 324 `__ + resp. `issue 171 `__) + - replaced Python 2 compatible enforcement of keyword arguments with '*' + - the old top level *functions* ``load``, ``safe_load``, ``round_trip_load``, + ``dump``, ``safe_dump``, ``round_trip_dump``, ``scan``, ``parse``, + ``compose``, ``emit``, ``serialize`` as well as their ``_all`` variants for + multi-document streams, now issue a ``PendingDeprecationning`` (e.g. when run + from pytest, but also Python is started with ``-Wd``). Use the methods on + ``YAML()``, which have been extended. + - fix for issue 376: indentation changes could put literal/folded scalar to start + before the ``#`` column of a following comment. Effectively making the comment + part of the scalar in the output. (reported by + `Bence Nagy `__) + + +0.16.13 (2021-03-05): + - fix for issue 359: could not update() CommentedMap with keyword arguments + (reported by `Steve Franchak `__) + - fix for issue 365: unable to dump mutated TimeStamp objects + (reported by Anton Akmerov `__) + - fix for issue 371: unable to addd comment without starting space + (reported by 'Mark Grandi `__) + - fix for issue 373: recursive call to walk_tree not preserving all params + (reported by `eulores `__) + - a None value in a flow-style sequence is now dumped as `null` instead + of `!!null ''` (reported by mcarans on + `StackOverflow `__) + +0.16.12 (2020-09-04): + - update links in doc + +0.16.11 (2020-09-03): + - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco + https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 ) + +0.16.10 (2020-02-12): + - (auto) updated image references in README to sourceforge + +0.16.9 (2020-02-11): + - update CHANGES + +0.16.8 (2020-02-11): + - update requirements so that ruamel.yaml.clib is installed for 3.8, + as it has become available (via manylinux builds) + +0.16.7 (2020-01-30): + - fix typchecking issue on TaggedScalar (reported by Jens Nielsen) + - fix error in dumping literal scalar in sequence with comments before element + (reported by `EJ Etherington `__) + +0.16.6 (2020-01-20): + - fix empty string mapping key roundtripping with preservation of quotes as `? ''` + (reported via email by Tomer Aharoni). + - fix incorrect state setting in class constructor (reported by `Douglas Raillard + `__) + - adjust deprecation warning test for Hashable, as that no longer warns (reported + by `Jason Montleon `__) + +0.16.5 (2019-08-18): + - allow for ``YAML(typ=['unsafe', 'pytypes'])`` + +0.16.4 (2019-08-16): + - fix output of TAG directives with # (reported by `Thomas Smith + `__) + + +0.16.3 (2019-08-15): + - split construct_object + - change stuff back to keep mypy happy + - move setting of version based on YAML directive to scanner, allowing to + check for file version during TAG directive scanning + +0.16.2 (2019-08-15): + - preserve YAML and TAG directives on roundtrip, correctly output # + in URL for YAML 1.2 (both reported by `Thomas Smith + `__) + +0.16.1 (2019-08-08): + - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz + `__) + - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by + `Thomas Smith + `__) + +0.16.0 (2019-07-25): + - split of C source that generates .so file to ruamel.yaml.clib + - duplicate keys are now an error when working with the old API as well + + +---- + +For older changes see the file +`CHANGES `_ diff --git a/_doc/Makefile b/_doc/Makefile new file mode 100644 index 0000000..c5d1aa0 --- /dev/null +++ b/_doc/Makefile @@ -0,0 +1,216 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = a4 +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/yaml.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/yaml.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/yaml" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/yaml" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/_doc/_static/license.svg b/_doc/_static/license.svg new file mode 100644 index 0000000..43dbd86 --- /dev/null +++ b/_doc/_static/license.svg @@ -0,0 +1 @@ +LicenseLicenseMITMIT diff --git a/_doc/_static/pypi.svg b/_doc/_static/pypi.svg new file mode 100644 index 0000000..35042d8 --- /dev/null +++ b/_doc/_static/pypi.svg @@ -0,0 +1 @@ + pypipypi0.17.170.17.17 diff --git a/_doc/api.rst b/_doc/api.rst new file mode 100644 index 0000000..d64df0d --- /dev/null +++ b/_doc/api.rst @@ -0,0 +1,287 @@ ++++++++++++++++++++++++++++ +Departure from previous API ++++++++++++++++++++++++++++ + +With version 0.15.0 ``ruyaml`` starts to depart from the previous (PyYAML) way +of loading and dumping. During a transition period the original +``load()`` and ``dump()`` in its various formats will still be supported, +but this is not guaranteed to be so with the transition to 1.0. + +At the latest with 1.0, but possible earlier transition error and +warning messages will be issued, so any packages depending on +ruyaml should pin the version with which they are testing. + + +Up to 0.15.0, the loaders (``load()``, ``safe_load()``, +``round_trip_load()``, ``load_all``, etc.) took, apart from the input +stream, a ``version`` argument to allow downgrading to YAML 1.1, +sometimes needed for +documents without directive. When round-tripping, there was an option to +preserve quotes. + +Up to 0.15.0, the dumpers (``dump()``, ``safe_dump``, +``round_trip_dump()``, ``dump_all()``, etc.) had a plethora of +arguments, some inherited from ``PyYAML``, some added in +``ruyaml``. The only required argument is the ``data`` to be +dumped. If the stream argument is not provided to the dumper, then a +string representation is build up in memory and returned to the +caller. + +Starting with 0.15.0 ``load()`` and ``dump()`` are methods on a +``YAML`` instance and only take the stream, +resp. the data and stream argument. All other parameters are set on the instance +of ``YAML`` before calling ``load()`` or ``dump()`` + +Before 0.15.0:: + + from pathlib import Path + import ruyaml + + data = ruyaml.safe_load("abc: 1") + out = Path('/tmp/out.yaml') + with out.open('w') as fp: + ruyaml.safe_dump(data, fp, default_flow_style=False) + +after:: + + from pathlib import Path + from ruyaml import YAML + + yaml = YAML(typ='safe') + yaml.default_flow_style = False + data = yaml.load("abc: 1") + out = Path('/tmp/out.yaml') + yaml.dump(data, out) + +If you previously used a keyword argument ``explicit_start=True`` you +now do ``yaml.explicit_start = True`` before calling ``dump()``. The +``Loader`` and ``Dumper`` keyword arguments are not supported that +way. You can provide the ``typ`` keyword to ``rt`` (default), +``safe``, ``unsafe`` or ``base`` (for round-trip load/dump, safe_load/dump, +load/dump resp. using the BaseLoader / BaseDumper. More fine-control +is possible by setting the attributes ``.Parser``, ``.Constructor``, +``.Emitter``, etc., to the class of the type to create for that stage +(typically a subclass of an existing class implementing that). + +The default loader (``typ='rt'``) is a direct derivative of the safe loader, without the +methods to construct arbitrary Python objects that make the ``unsafe`` loader +unsafe, but with the changes needed for round-trip preservation of comments, +etc.. For trusted Python classes a constructor can of course be added to the round-trip +or safe-loader, but this has to be done explicitly (``add_constructor``). + +All data is dumped (not just for round-trip-mode) with ``.allow_unicode += True`` + +You can of course have multiple YAML instances active at the same +time, with different load and/or dump behaviour. + +Initially only the typical operations are supported, but in principle +all functionality of the old interface will be available via +``YAML`` instances (if you are using something that isn't let me know). + +If a parse or dump fails, and throws and exception, the state of the +``YAML()`` instance is not guaranteed to be able to handle further +processing. You should, at that point to recreate the YAML instance before +proceeding. + + +Loading ++++++++ + +Duplicate keys +^^^^^^^^^^^^^^ + +In JSON mapping keys should be unique, in YAML they must be unique. +PyYAML never enforced this although the YAML 1.1 specification already +required this. + +In the new API (starting 0.15.1) duplicate keys in mappings are no longer allowed by +default. To allow duplicate keys in mappings:: + + yaml = ruyaml.YAML() + yaml.allow_duplicate_keys = True + yaml.load(stream) + +In the old API this is a warning starting with 0.15.2 and an error in +0.16.0. + +When a duplicate key is found it and its value are discarded, as should be done +according to the `YAML 1.1 specification `__. + +Dumping a multi-documents YAML stream ++++++++++++++++++++++++++++++++++++++ + +The "normal" ``dump_all`` expected as first element a list of documents, or +something else the internals of the method can iterate over. To read +and write a multi-document you would either make a ``list``:: + + yaml = YAML() + data = list(yaml.load_all(in_path)) + # do something on data[0], data[1], etc. + yaml.dump_all(data, out_path) + + +or create some function/object that would yield the ``data`` values. + +What you now can do is create ``YAML()`` as an context manager. This +works for output (dumping) only, requires you to specify the output +(file, buffer, ``Path``) at creation time, and doesn't support +``transform`` (yet). + +:: + + with YAML(output=sys.stdout) as yaml: + yaml.explicit_start = True + for data in yaml.load_all(Path(multi_document_filename)): + # do something on data + yaml.dump(data) + + +Within the context manager, you cannot use the ``dump()`` with a +second (stream) argument, nor can you use ``dump_all()``. The +``dump()`` within the context of the ``YAML()`` automatically creates +multi-document if called more than once. + +To combine multiple YAML documents from multiple files: + +:: + + list_of_filenames = ['x.yaml', 'y.yaml', ] + with YAML(output=sys.stdout) as yaml: + yaml.explicit_start = True + for path in list_of_filename: + with open(path) as fp: + yaml.dump(yaml.load(fp)) + + +The output will be a valid, uniformly indented YAML file. Doing +``cat {x,y}.yaml`` might result in a single document if there is not +document start marker at the beginning of ``y.yaml`` + + + + +Dumping ++++++++ + +Controls +^^^^^^^^ + +On your ``YAML()`` instance you can set attributes e.g with:: + + yaml = YAML(typ='safe', pure=True) + yaml.allow_unicode = False + +available attributes include: + +``unicode_supplementary`` + Defaults to ``True`` if Python's Unicode size is larger than 2 bytes. Set to ``False`` to + enforce output of the form ``\U0001f601`` (ignored if ``allow_unicode`` is ``False``) + +Transparent usage of new and old API +++++++++++++++++++++++++++++++++++++ + +If you have multiple packages depending on ``ruyaml``, or install +your utility together with other packages not under your control, then +fixing your ``install_requires`` might not be so easy. + +Depending on your usage you might be able to "version" your usage to +be compatible with both the old and the new. The following are some +examples all assuming ``import ruyaml`` somewhere at the top +of your file and some ``istream`` and ``ostream`` apropriately opened +for reading resp. writing. + + +Loading and dumping using the ``SafeLoader``:: + + yml = ruyaml.YAML(typ='safe', pure=True) # 'safe' load and dump + data = yml.load(istream) + yml.dump(data, ostream) + +Loading with the ``CSafeLoader``, dumping with +``RoundTripLoader``. You need two ``YAML`` instances, but each of them +can be re-used:: + + yml = ruyaml.YAML(typ='safe') + data = yml.load(istream) + ymlo = ruyaml.YAML() # or yaml.YAML(typ='rt') + ymlo.width = 1000 + ymlo.explicit_start = True + ymlo.dump(data, ostream) + +Loading and dumping from ``pathlib.Path`` instances using the +round-trip-loader:: + + # in myyaml.py + class MyYAML(yaml.YAML): + def __init__(self): + yaml.YAML.__init__(self) + self.preserve_quotes = True + self.indent(mapping=4, sequence=4, offset=2) + # in your code + from myyaml import MyYAML + + # some pathlib.Path + from pathlib import Path + inf = Path('/tmp/in.yaml') + outf = Path('/tmp/out.yaml') + + yml = MyYAML() + # no need for with statement when using pathlib.Path instances + data = yml.load(inf) + yml.dump(data, outf) + ++++++++++++++++++++++ +Reason for API change ++++++++++++++++++++++ + +``ruyaml`` inherited the way of doing things from ``PyYAML``. In +particular when calling the function ``load()`` or ``dump()`` +temporary instances of ``Loader()`` resp. ``Dumper()`` were +created that were discarded on termination of the function. + +This way of doing things leads to several problems: + +- it is virtually impossible to return information to the caller apart from the + constructed data structure. E.g. if you would get a YAML document + version number from a directive, there is no way to let the caller + know apart from handing back special data structures. The same + problem exists when trying to do on the fly + analysis of a document for indentation width. + +- these instances were composites of the various load/dump steps and + if you wanted to enhance one of the steps, you needed e.g. subclass + the emitter and make a new composite (dumper) as well, providing all + of the parameters (i.e. copy paste) + + Alternatives, like making a class that returned a ``Dumper`` when + called and sets attributes before doing so, is cumbersome for + day-to-day use. + +- many routines (like ``add_representer()``) have a direct global + impact on all of the following calls to ``dump()`` and those are + difficult if not impossible to turn back. This forces the need to + subclass ``Loaders`` and ``Dumpers``, a long time problem in PyYAML + as some attributes were not ``deep_copied`` although a bug-report + (and fix) had been available a long time. + +- If you want to set an attribute, e.g. to control whether literal + block style scalars are allowed to have trailing spaces on a line + instead of being dumped as double quoted scalars, you have to change + the ``dump()`` family of routines, all of the ``Dumpers()`` as well + as the actual functionality change in ``emitter.Emitter()``. The + functionality change takes changing 4 (four!) lines in one file, and being able + to enable that another 50+ line changes (non-contiguous) in 3 more files resulting + in diff that is far over 200 lines long. + +- replacing libyaml with something that doesn't both support ``0o52`` + and ``052`` for the integer ``42`` (instead of ``52`` as per YAML 1.2) + is difficult + + +With ``ruyaml>=0.15.0`` the various steps "know" about the +``YAML`` instance and can pick up setting, as well as report back +information via that instance. Representers, etc., are added to a +reusable instance and different YAML instances can co-exists. + +This change eases development and helps prevent regressions. diff --git a/_doc/basicuse.rst b/_doc/basicuse.rst new file mode 100644 index 0000000..5b4f9a8 --- /dev/null +++ b/_doc/basicuse.rst @@ -0,0 +1,55 @@ +*********** +Basic Usage +*********** + +You load a YAML document using:: + + from ruyaml import YAML + + yaml=YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip) + yaml.load(doc) + +in this ``doc`` can be a file pointer (i.e. an object that has the +``.read()`` method, a string or a ``pathlib.Path()``. ``typ='safe'`` +accomplishes the same as what ``safe_load()`` did before: loading of a +document without resolving unknown tags. Provide ``pure=True`` to +enforce using the pure Python implementation, otherwise the faster C libraries will be used +when possible/available but these behave slightly different (and sometimes more like a YAML 1.1 loader). + +Dumping works in the same way:: + + from ruyaml import YAML + + yaml=YAML() + yaml.default_flow_style = False + yaml.dump({'a': [1, 2]}, s) + +in this ``s`` can be a file pointer (i.e. an object that has the +``.write()`` method, or a ``pathlib.Path()``. If you want to display +your output, just stream to ``sys.stdout``. + +If you need to transform a string representation of the output provide +a function that takes a string as input and returns one:: + + def tr(s): + return s.replace('\n', '<\n') # such output is not valid YAML! + + yaml.dump(data, sys.stdout, transform=tr) + +More examples +============= + +Using the C based SafeLoader (at this time is inherited from +libyaml/PyYAML and e.g. loads ``0o52`` as well as ``052`` load as integer ``42``):: + + from ruyaml import YAML + + yaml=YAML(typ="safe") + yaml.load("""a:\n b: 2\n c: 3\n""") + +Using the Python based SafeLoader (YAML 1.2 support, ``052`` loads as ``52``):: + + from ruyaml import YAML + + yaml=YAML(typ="safe", pure=True) + yaml.load("""a:\n b: 2\n c: 3\n""") diff --git a/_doc/conf.py b/_doc/conf.py new file mode 100644 index 0000000..0a8d2af --- /dev/null +++ b/_doc/conf.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# +# yaml documentation build configuration file, created by +# sphinx-quickstart on Mon Feb 29 12:03:00 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os # NOQA +import sys # NOQA + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = {".rst": "restructuredtext"} + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"ruyaml" +copyright = u"2017-2021, Anthon van der Neut and other contributors" +author = u"Anthon van der Neut et al." + +# The version info for the project you are documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +try: + from ruyaml import __version__, version_info # NOQA + + # The short X.Y version. + version = '.'.join([str(ch) for ch in version_info[:3]]) + # The full version, including alpha/beta/rc tags. + release = version # = __version__ +except Exception as e: + print("exception", e) + version = release = "dev" +print("ruyaml version", version) +# print('cwd:', os.getcwd()) +# current working directory is the one with `conf.py` ! + + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "default" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +html_title = "Python YAML package documentation" + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "yamldoc" + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + "papersize": "a4paper", + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + 'yaml.tex', + 'Python YAML package documentation', + 'Anthon van der Neut', + 'manual', + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'yaml', 'yaml Documentation', [author], 1)] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + 'yaml', + 'yaml Documentation', + author, + "yaml", + "One line description of project.", + "Miscellaneous", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False diff --git a/_doc/contributing.rst b/_doc/contributing.rst new file mode 100644 index 0000000..a071851 --- /dev/null +++ b/_doc/contributing.rst @@ -0,0 +1,80 @@ +************ +Contributing +************ + +All contributions to ``ruyaml`` are welcome. +Please post an issue or, if possible, a pull request (PR) on github. + +Please don't use issues to post support questions. + +TODO:: The maintainers of ruyaml don't have an official support channel yet. + +Documentation +============= + +The documentation for ``ruyaml`` is written in the `ReStructured Text +`_ format and follows the `Sphinx +Document Generator `_'s conventions. + +Code +==== + +Code changes are welcome as well, but anything beyond a minor change should be +tested (``tox``/``pytest``), checked for typing conformance (``mypy``) and pass +pep8 conformance (``flake8``). + +In my experience it is best to use two ``virtualenv`` environments, one with the +latest Python from the 2.7 series, the other with 3.5 or 3.6. In the +site-packages directory of each virtualenv make a soft link to the ruyaml +directory of your (cloned and checked out) copy of the repository. Do not under +any circumstances run ``pip install -e .`` it will +not work (at least not until these commands are fixed to support packages with +namespaces). + +You can install ``tox``, ``pytest``, ``mypy`` and ``flake8`` in the Python3 +``virtualenv``, or in a ``virtualenv`` of their own. If all of these commands +pass without warning/error, you can create your pull-request. + +Flake ++++++ + +The `Flake8 `_ configuration is part of ``setup.cfg``:: + + [flake8] + show-source = True + max-line-length = 95 + ignore = F405 + +The suppress of F405 is necessary to allow ``from xxx import *``. + +Please make sure your checked out source passes ``flake8`` without test (it should). +Then make your changes pass without any warnings/errors. + +Tox/pytest +++++++++++ + +Whether you add something or fix some bug with your code changes, first add one +or more tests that fail in the unmodified source when running ``tox``. Once that +is in place add your code, which should have as a result that your added test(s) +no longer fail, and neither should any other existing tests. + +Typing/mypy ++++++++++++ + +You should run ``mypy`` from ``ruyaml``'s source directory:: + + mypy --strict --follow-imports silent lib/ruyaml/*.py + +This command should give no errors or warnings. + + +Vulnerabilities +=============== + +If you find a vulnerability in ``ruyaml`` (e.g. that would show the ``safe`` +and ``rt`` loader are not safe due to a bug in the software)), please contact +the maintainers directly via email. + +After the vulnerability is removed, and affected parties notified to allow them +to update versions, the vulnerability will be published, and your role in +finding/resolving this properly attributed. diff --git a/_doc/contributing.ryd b/_doc/contributing.ryd new file mode 100644 index 0000000..6632472 --- /dev/null +++ b/_doc/contributing.ryd @@ -0,0 +1,133 @@ +version: 0.1 +output: rst +fix_inline_single_backquotes: true +pdf: true +--- | +************ +Contributing +************ + +Any contribution to ``ruamel.yaml`` is welcome, be it in the form of an +email, a question on stackoverflow (I'll get notified of that when you tag it +with ``ruamel.yaml``), an issue or pull-request (PR) on sourceforge. + +Contributing via stackoverflow is, for most, easiest to make. When I answer your +question there and the answer warrants an extension to the documentation or +code, I will include it in a documnetation update and/or future (normally the +next) release of ``ruamel.yaml``. + +Please don't post support questions as an issue on sourceforge. + +Documentation +============= + +The documentation for ``ruamel.yaml`` is in YAML, more specifically in `ryd +`_ ( /rɑɪt/, pronounced like the verb “write†+). This is reStructuredText mixed with Python, each in separate YAML documents +within a single file. If you know a bit of YAML, Python and reStructuredText it +will be clear how that works. + +If you want to contribute to the documentation, you can sent me a clear +description of the needed changes, e.g. as a unified diff. If the changes +encompass multiple documents in a ``.ryd`` file, it is best to install ``ryd`` +(use a virtualenv!), clone the ``ruamel.yaml`` repository on sourceforge, edit +documentation, run ``ryd``:: + + ryd --pdf '**/*.ryd' + +(quoting might not be necessary depending on your shell), and once the PDF(s) +look acceptable, submit a pull-request. + +``ryd`` will check your file for single backquotes (my most common mistake going +back and forth between reStructuredText and other mark up). + +If you contribute example programs, note that ``ryd`` will automatically run you +program (so it should be correct) and can include the output of the program in +the resulting ``.rst`` (and PDF) file. + +Code +==== + +Code changes are welcome as well, but anything beyond a minor change should be +tested (``tox``/``pytest``), checked for typing conformance (``mypy``) and pass +pep8 conformance (``flake8``). + +In my experience it is best to use two ``virtualenv`` environments, one with the +latest Python from the 2.7 series, the other with 3.5 or 3.6. In the +site-packages directory of each virtualenv make a soft link to the ruamel +directory of your (cloned and checked out) copy of the repository. Do not under +any circumstances run ``pip install -e .`` it will +not work (at least not until these commands are fixed to support packages with +namespaces). + +You can install ``tox``, ``pytest``, ``mypy`` and ``flake8`` in the Python3 +``virtualenv``, or in a ``virtualenv`` of their own. If all of these commands +pass without warning/error, you can create your pull-request. + +Flake ++++++ + +My ``~/.config/flake8`` file:: + + [flake8] + show-source = True + max-line-length = 95 + ignore = F405 + +The suppress of F405 is necessary to allow ``from xxx import *``, which I have +not removed in all places (yet). + +First make sure your checked out source passes ``flake8`` without test (it should). +Then make your changes pass without any warnings/errors. + +Tox/pytest +++++++++++ + +Whether you add something or fix some bug with your code changes, first add one +or more tests that fail in the unmodified source when running ``tox``. Once that +is in place add your code, which should have as a result that your added test(s) +no longer fail, and neither should any other existing tests. + +Typing/mypy ++++++++++++ + +If you add methods or functions to ``ruamel.yaml``, you will need to add Python +2.7 compatible typing information in order for ``mypy`` to pass without error. + +I run ``mypy`` from the directory where the (link to) ruamel directory is +using:: + + mypy --py2 --strict --follow-imports silent ruamel/yaml/*.py + +This should give no errors or warnings + + +Generated files +=============== + +I use a minimal environment when developing, void of most artifacts needed for +packaging, testing etc. These artifact files are *generated*, just before committing to +sourceforge and pushing to PyPI, with nuances coming from the ``_package_data`` +information in ``__init__.py``. Including changes in these files will +automatically be reverted, even assuming your PR is accepted as is. + +Consider the following files **read-only** (if you think changes need to made these, +contact me):: + + tox.ini + LICENSE + _ryd/conf.py + -ryd/Makefile + + +Vulnerabilities +=============== + +If you find a vulnerability in ``ruamel.yaml`` (e.g. that would show the ``safe`` +and ``rt`` loader are not safe due to a bug in the software)), please contact me +directly via email, or by leaving a comment on StackOverflow (below any of my +posts), without going into the details of the vulnerability. After contact is +estabilished I will work to eliminate the vulnerability in a timely fashion. +After the vulnerability is removed, and affected parties notified to allow them +to update versions, the vulnerability will be published, and your role in +finding/resolving this properly attributed. diff --git a/_doc/detail.rst b/_doc/detail.rst new file mode 100644 index 0000000..2f7d682 --- /dev/null +++ b/_doc/detail.rst @@ -0,0 +1,289 @@ +******* +Details +******* + + + +- support for simple lists as mapping keys by transforming these to tuples +- ``!!omap`` generates ordereddict (C) on Python 2, collections.OrderedDict + on Python 3, and ``!!omap`` is generated for these types. +- Tests whether the C yaml library is installed as well as the header + files. That library doesn't generate CommentTokens, so it cannot be used to + do round trip editing on comments. It can be used to speed up normal + processing (so you don't need to install ``ruyaml`` and ``PyYaml``). + See the section *Optional requirements*. +- Basic support for multiline strings with preserved newlines and + chomping ( '``|``', '``|+``', '``|-``' ). As this subclasses the string type + the information is lost on reassignment. (This might be changed + in the future so that the preservation/folding/chomping is part of the + parent container, like comments). +- anchors names that are hand-crafted (not of the form``idNNN``) are preserved +- `merges `_ in dictionaries are preserved +- adding/replacing comments on block-style sequences and mappings + with smart column positioning +- collection objects (when read in via RoundTripParser) have an ``lc`` + property that contains line and column info ``lc.line`` and ``lc.col``. + Individual positions for mappings and sequences can also be retrieved + (``lc.key('a')``, ``lc.value('a')`` resp. ``lc.item(3)``) +- preservation of whitelines after block scalars. Contributed by Sam Thursfield. + +*In the following examples it is assumed you have done something like:*:: + + from ruyaml import YAML + yaml = YAML() + +*if not explicitly specified.* + +Indentation of block sequences +============================== + +Although ruyaml doesn't preserve individual indentations of block sequence +items, it does properly dump:: + + x: + - b: 1 + - 2 + +back to:: + + x: + - b: 1 + - 2 + +if you specify ``yaml.indent(sequence=4)`` (indentation is counted to the +beginning of the sequence element). + +PyYAML (and older versions of ruyaml) gives you non-indented +scalars (when specifying default_flow_style=False):: + + x: + - b: 1 + - 2 + +You can use ``mapping=4`` to also have the mappings values indented. +The dump also observes an additional ``offset=2`` setting that +can be used to push the dash inwards, *within the space defined by* ``sequence``. + +The above example with the often seen ``yaml.indent(mapping=2, sequence=4, offset=2)`` +indentation:: + + x: + y: + - b: 1 + - 2 + +The defaults are as if you specified ``yaml.indent(mapping=2, sequence=2, offset=0)``. + +If the ``offset`` equals ``sequence``, there is not enough +room for the dash and the space that has to follow it. In that case the +element itself would normally be pushed to the next line (and older versions +of ruyaml did so). But this is +prevented from happening. However the ``indent`` level is what is used +for calculating the cumulative indent for deeper levels and specifying +``sequence=3`` resp. ``offset=2``, might give correct, but counter +intuitive results. + +**It is best to always have** ``sequence >= offset + 2`` +**but this is not enforced**. Depending on your structure, not following +this advice **might lead to invalid output**. + +Inconsistently indented YAML +++++++++++++++++++++++++++++ + +If your input is inconsistently indented, such indentation cannot be preserved. +The first round-trip will make it consistent/normalize it. Here are some +inconsistently indented YAML examples. + +``b`` indented 3, ``c`` indented 4 positions:: + + a: + b: + c: 1 + +Top level sequence is indented 2 without offset, the other sequence 4 (with offset 2):: + + - key: + - foo + - bar + + +Positioning ':' in top level mappings, prefixing ':' +==================================================== + +If you want your toplevel mappings to look like:: + + library version: 1 + comment : | + this is just a first try + +then set ``yaml.top_level_colon_align = True`` +(and ``yaml.indent = 4``). ``True`` causes calculation based on the longest key, +but you can also explicitly set a number. + +If you want an extra space between a mapping key and the colon specify +``yaml.prefix_colon = ' '``:: + + - https://myurl/abc.tar.xz : 23445 + # ^ extra space here + - https://myurl/def.tar.xz : 944 + +If you combine ``prefix_colon`` with ``top_level_colon_align``, the +top level mapping doesn't get the extra prefix. If you want that +anyway, specify ``yaml.top_level_colon_align = 12`` where ``12`` has to be an +integer that is one more than length of the widest key. + + +Document version support +++++++++++++++++++++++++ + +In YAML a document version can be explicitly set by using:: + + %YAML 1.x + +before the document start (at the top or before a +``---``). For ``ruyaml`` x has to be 1 or 2. If no explicit +version is set `version 1.2 `_ +is assumed (which has been released in 2009). + +The 1.2 version does **not** support: + +- sexagesimals like ``12:34:56`` +- octals that start with 0 only: like ``012`` for number 10 (``0o12`` **is** + supported by YAML 1.2) +- Unquoted Yes and On as alternatives for True and No and Off for False. + +If you cannot change your YAML files and you need them to load as 1.1 +you can load with ``yaml.version = (1, 1)``, +or the equivalent (version can be a tuple, list or string) ``yaml.version = "1.1"`` + +*If you cannot change your code, stick with ruyaml==0.10.23 and let +me know if it would help to be able to set an environment variable.* + +This does not affect dump as ruyaml never emitted sexagesimals, nor +octal numbers, and emitted booleans always as true resp. false + +Round trip including comments ++++++++++++++++++++++++++++++ + +The major motivation for this fork is the round-trip capability for +comments. The integration of the sources was just an initial step to +make this easier. + +adding/replacing comments +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting with version 0.8, you can add/replace comments on block style +collections (mappings/sequences resuting in Python dict/list). The basic +for for this is:: + + from __future__ import print_function + + import sys + import ruyaml + + yaml = ruyaml.YAML() # defaults to round-trip + + inp = """\ + abc: + - a # comment 1 + xyz: + a: 1 # comment 2 + b: 2 + c: 3 + d: 4 + e: 5 + f: 6 # comment 3 + """ + + data = yaml.load(inp) + data['abc'].append('b') + data['abc'].yaml_add_eol_comment('comment 4', 1) # takes column of comment 1 + data['xyz'].yaml_add_eol_comment('comment 5', 'c') # takes column of comment 2 + data['xyz'].yaml_add_eol_comment('comment 6', 'e') # takes column of comment 3 + data['xyz'].yaml_add_eol_comment('comment 7', 'd', column=20) + + yaml.dump(data, sys.stdout) + +Resulting in:: + + abc: + - a # comment 1 + - b # comment 4 + xyz: + a: 1 # comment 2 + b: 2 + c: 3 # comment 5 + d: 4 # comment 7 + e: 5 # comment 6 + f: 6 # comment 3 + +If the comment doesn't start with '#', this will be added. The key is +the element index for list, the actual key for dictionaries. As can be seen +from the example, the column to choose for a comment is derived +from the previous, next or preceding comment column (picking the first one +found). + +Config file formats ++++++++++++++++++++ + +There are only a few configuration file formats that are easily +readable and editable: JSON, INI/ConfigParser, YAML (XML is to cluttered +to be called easily readable). + +Unfortunately `JSON `_ doesn't support comments, +and although there are some solutions with pre-processed filtering of +comments, there are no libraries that support round trip updating of +such commented files. + +INI files support comments, and the excellent `ConfigObj +`_ library by Foord +and Larosa even supports round trip editing with comment preservation, +nesting of sections and limited lists (within a value). Retrieval of +particular value format is explicit (and extensible). + +YAML has basic mapping and sequence structures as well as support for +ordered mappings and sets. It supports scalars various types +including dates and datetimes (missing in JSON). +YAML has comments, but these are normally thrown away. + +Block structured YAML is a clean and very human readable +format. By extending the Python YAML parser to support round trip +preservation of comments, it makes YAML a very good choice for +configuration files that are human readable and editable while at +the same time interpretable and modifiable by a program. + +Extending ++++++++++ + +There are normally six files involved when extending the roundtrip +capabilities: the reader, parser, composer and constructor to go from YAML to +Python and the resolver, representer, serializer and emitter to go the other +way. + +Extending involves keeping extra data around for the next process step, +eventuallly resulting in a different Python object (subclass or alternative), +that should behave like the original, but on the way from Python to YAML +generates the original (or at least something much closer). + +Smartening +++++++++++ + +When you use round-tripping, then the complex data you get are +already subclasses of the built-in types. So you can patch +in extra methods or override existing ones. Some methods are already +included and you can do:: + + yaml_str = """\ + a: + - b: + c: 42 + - d: + f: 196 + e: + g: 3.14 + """ + + + data = yaml.load(yaml_str) + + assert data.mlget(['a', 1, 'd', 'f'], list_ok=True) == 196 diff --git a/_doc/dumpcls.rst b/_doc/dumpcls.rst new file mode 100644 index 0000000..8f97c13 --- /dev/null +++ b/_doc/dumpcls.rst @@ -0,0 +1,101 @@ + +********************** +Dumping Python classes +********************** + +Only ``yaml = YAML(typ='unsafe')`` loads and dumps Python objects out-of-the-box. And +since it loads **any** Python object, this can be unsafe. + +If you have instances of some class(es) that you want to dump or load, it is +easy to allow the YAML instance to do that explicitly. You can either register the +class with the ``YAML`` instance or decorate the class. + +Registering is done with ``YAML.register_class()``:: + + import sys + import ruyaml + + + class User: + def __init__(self, name, age): + self.name = name + self.age = age + + + yaml = ruyaml.YAML() + yaml.register_class(User) + yaml.dump([User('Anthon', 18)], sys.stdout) + +which gives as output:: + + - !User + name: Anthon + age: 18 + +The tag ``!User`` originates from the name of the class. + +You can specify a different tag by adding the attribute ``yaml_tag``, and +explicitly specify dump and/or load *classmethods* which have to be called +``from_yaml`` resp. ``from_yaml``:: + + import sys + import ruyaml + + + class User: + yaml_tag = u'!user' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar(cls.yaml_tag, + u'{.name}-{.age}'.format(node, node)) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + + yaml = ruyaml.YAML() + yaml.register_class(User) + yaml.dump([User('Anthon', 18)], sys.stdout) + +which gives as output:: + + - !user Anthon-18 + + +When using the decorator, which takes the ``YAML()`` instance as a parameter, +the ``yaml = YAML()`` line needs to be moved up in the file:: + + import sys + from ruyaml import YAML, yaml_object + + yaml = YAML() + + + @yaml_object(yaml) + class User: + yaml_tag = u'!user' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar(cls.yaml_tag, + u'{.name}-{.age}'.format(node, node)) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + + yaml.dump([User('Anthon', 18)], sys.stdout) + +The ``yaml_tag``, ``from_yaml`` and ``to_yaml`` work in the same way as when using +``.register_class()``. diff --git a/_doc/dumpcls.ryd b/_doc/dumpcls.ryd new file mode 100644 index 0000000..929d5f5 --- /dev/null +++ b/_doc/dumpcls.ryd @@ -0,0 +1,107 @@ +version: 0.1 +output: rst +fix_inline_single_backquotes: true +pdf: true +# code_directory: ../_example +--- | + +********************** +Dumping Python classes +********************** + +Only ``yaml = YAML(typ='unsafe')`` loads and dumps Python objects out-of-the-box. And +since it loads **any** Python object, this can be unsafe. + +If you have instances of some class(es) that you want to dump or load, it is +easy to allow the YAML instance to do that explicitly. You can either register the +class with the ``YAML`` instance or decorate the class. + +Registering is done with ``YAML.register_class()``:: + +--- !python | + +import sys +import ruamel.yaml + + +class User(object): + def __init__(self, name, age): + self.name = name + self.age = age + + +yaml = ruamel.yaml.YAML() +yaml.register_class(User) +yaml.dump([User('Anthon', 18)], sys.stdout) +--- !stdout | +which gives as output:: +--- | +The tag ``!User`` originates from the name of the class. + +You can specify a different tag by adding the attribute ``yaml_tag``, and +explicitly specify dump and/or load *classmethods* which have to be called +``to_yaml`` resp. ``from_yaml``:: + +--- !python | +import sys +import ruamel.yaml + + +class User: + yaml_tag = u'!user' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar(cls.yaml_tag, + u'{.name}-{.age}'.format(node, node)) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + +yaml = ruamel.yaml.YAML() +yaml.register_class(User) +yaml.dump([User('Anthon', 18)], sys.stdout) +--- !stdout | +which gives as output:: + +--- | + +When using the decorator, which takes the ``YAML()`` instance as a parameter, +the ``yaml = YAML()`` line needs to be moved up in the file:: + +--- !python | +import sys +from ruamel.yaml import YAML, yaml_object + +yaml = YAML() + + +@yaml_object(yaml) +class User: + yaml_tag = u'!user' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar(cls.yaml_tag, + u'{.name}-{.age}'.format(node, node)) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + +yaml.dump([User('Anthon', 18)], sys.stdout) + +--- | +The ``yaml_tag``, ``from_yaml`` and ``to_yaml`` work in the same way as when using +``.register_class()``. diff --git a/_doc/example.rst b/_doc/example.rst new file mode 100644 index 0000000..e9bdeaa --- /dev/null +++ b/_doc/example.rst @@ -0,0 +1,332 @@ +******** +Examples +******** + +Basic round trip of parsing YAML to Python objects, modifying +and generating YAML:: + + import sys + from ruyaml import YAML + + inp = """\ + # example + name: + # details + family: Smith # very common + given: Alice # one of the siblings + """ + + yaml = YAML() + code = yaml.load(inp) + code['name']['given'] = 'Bob' + + yaml.dump(code, sys.stdout) + +Resulting in:: + + # example + name: + # details + family: Smith # very common + given: Bob # one of the siblings + +with the old API:: + + from __future__ import print_function + + import sys + import ruyaml + + inp = """\ + # example + name: + # details + family: Smith # very common + given: Alice # one of the siblings + """ + + code = ruyaml.load(inp, ruyaml.RoundTripLoader) + code['name']['given'] = 'Bob' + + ruyaml.dump(code, sys.stdout, Dumper=ruyaml.RoundTripDumper) + + # the last statement can be done less efficient in time and memory with + # leaving out the end='' would cause a double newline at the end + # print(ruyaml.dump(code, Dumper=ruyaml.RoundTripDumper), end='') + +Resulting in :: + + # example + name: + # details + family: Smith # very common + given: Bob # one of the siblings + +---- + +YAML handcrafted anchors and references as well as key merging +are preserved. The merged keys can transparently be accessed +using ``[]`` and ``.get()``:: + + from ruyaml import YAML + + inp = """\ + - &CENTER {x: 1, y: 2} + - &LEFT {x: 0, y: 2} + - &BIG {r: 10} + - &SMALL {r: 1} + # All the following maps are equal: + # Explicit keys + - x: 1 + y: 2 + r: 10 + label: center/big + # Merge one map + - <<: *CENTER + r: 10 + label: center/big + # Merge multiple maps + - <<: [*CENTER, *BIG] + label: center/big + # Override + - <<: [*BIG, *LEFT, *SMALL] + x: 1 + label: center/big + """ + + yaml = YAML() + data = yaml.load(inp) + assert data[7]['y'] == 2 + + +The ``CommentedMap``, which is the ``dict`` like construct one gets when round-trip loading, +supports insertion of a key into a particular position, while optionally adding a comment:: + + import sys + from ruyaml import YAML + + yaml_str = """\ + first_name: Art + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + """ + + yaml = YAML() + data = yaml.load(yaml_str) + data.insert(1, 'last name', 'Vandelay', comment="new key") + yaml.dump(data, sys.stdout) + +gives:: + + first_name: Art + last name: Vandelay # new key + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + +Please note that the comment is aligned with that of its neighbour (if available). + +The above was inspired by a `question `_ +posted by *demux* on StackOverflow. + +---- + +By default ``ruyaml`` indents with two positions in block style, for +both mappings and sequences. For sequences the indent is counted to the +beginning of the scalar, with the dash taking the first position of the +indented "space". + +You can change this default indentation by e.g. using ``yaml.indent()``:: + + import sys + from ruyaml import YAML + + d = dict(a=dict(b=2),c=[3, 4]) + yaml = YAML() + yaml.dump(d, sys.stdout) + print('0123456789') + yaml = YAML() + yaml.indent(mapping=4, sequence=6, offset=3) + yaml.dump(d, sys.stdout) + print('0123456789') + + +giving:: + + a: + b: 2 + c: + - 3 + - 4 + 0123456789 + a: + b: 2 + c: + - 3 + - 4 + 0123456789 + + +If a block sequence or block mapping is the element of a sequence, the +are, by default, displayed `compact +`__ notation. This means +that the dash of the "parent" sequence is on the same line as the +first element resp. first key/value pair of the child collection. + +If you want either or both of these (sequence within sequence, mapping +within sequence) to begin on the next line use ``yaml.compact()``:: + + import sys + from ruyaml import YAML + + d = [dict(b=2), [3, 4]] + yaml = YAML() + yaml.dump(d, sys.stdout) + print('='*15) + yaml = YAML() + yaml.compact(seq_seq=False, seq_map=False) + yaml.dump(d, sys.stdout) + + +giving:: + + - b: 2 + - - 3 + - 4 + =============== + - + b: 2 + - + - 3 + - 4 + + +------ + +The following program uses three dumps on the same data, resulting in a stream with +three documents:: + + import sys + from ruyaml import YAML + + data = {1: {1: [{1: 1, 2: 2}, {1: 1, 2: 2}], 2: 2}, 2: 42} + + yaml = YAML() + yaml.explicit_start = True + yaml.dump(data, sys.stdout) + yaml.indent(sequence=4, offset=2) + yaml.dump(data, sys.stdout) + + + def sequence_indent_four(s): + # this will fail on direclty nested lists: {1; [[2, 3], 4]} + levels = [] + ret_val = '' + for line in s.splitlines(True): + ls = line.lstrip() + indent = len(line) - len(ls) + if ls.startswith('- '): + if not levels or indent > levels[-1]: + levels.append(indent) + elif levels: + if indent < levels[-1]: + levels = levels[:-1] + # same -> do nothing + else: + if levels: + if indent <= levels[-1]: + while levels and indent <= levels[-1]: + levels = levels[:-1] + ret_val += ' ' * len(levels) + line + return ret_val + + yaml = YAML() + yaml.explicit_start = True + yaml.dump(data, sys.stdout, transform=sequence_indent_four) + +gives as output:: + + --- + 1: + 1: + - 1: 1 + 2: 2 + - 1: 1 + 2: 2 + 2: 2 + 2: 42 + --- + 1: + 1: + - 1: 1 + 2: 2 + - 1: 1 + 2: 2 + 2: 2 + 2: 42 + --- + 1: + 1: + - 1: 1 + 2: 2 + - 1: 1 + 2: 2 + 2: 2 + 2: 42 + + +The transform example, in the last document, was inspired by a +`question posted by *nowox* +`_ on StackOverflow. + +----- + +Output of ``dump()`` as a string +++++++++++++++++++++++++++++++++ + +The single most abused "feature" of the old API is not providing the (second) +stream parameter to one of the ``dump()`` variants, in order to get a monolithic string +representation of the stream back. + +Apart from being memory inefficient and slow, quite often people using this did not +realise that ``print(round_trip_dump(dict(a=1, b=2)))`` gets you an extra, +empty, line after ``b: 2``. + +The real question is why this functionality, which is seldom really +necessary, is available in the old API (and in PyYAML) in the first place. One +explanation you get by looking at what someone would need to do to make this +available if it weren't there already. Apart from subclassing the ``Serializer`` +and providing a new ``dump`` method, which would ten or so lines, another +**hundred** lines, essentially the whole ``dumper.py`` file, would need to be +copied and to make use of this serializer. + +The fact is that one should normally be doing ``round_trip_dump(dict(a=1, b=2)), +sys.stdout)`` and do away with 90% of the cases for returning the string, and +that all post-processing YAML, before writing to stream, can be handled by using +the ``transform=`` parameter of dump, being able to handle most of the rest. But +it is also much easier in the new API to provide that YAML output as a string if +you really need to have it (or think you do):: + + import sys + from ruyaml import YAML + from io import StringIO + + class MyYAML(YAML): + def dump(self, data, stream=None, **kw): + inefficient = False + if stream is None: + inefficient = True + stream = StringIO() + YAML.dump(self, data, stream, **kw) + if inefficient: + return stream.getvalue() + + yaml = MyYAML() # or typ='safe'/'unsafe' etc + +with about one tenth of the lines needed for the old interface, you can once more do:: + + print(yaml.dump(dict(a=1, b=2))) + +instead of:: + + yaml.dump((dict(a=1, b=2)), sys.stdout) + print() # or sys.stdout.write('\n') diff --git a/_doc/index.rst b/_doc/index.rst new file mode 100644 index 0000000..0324eff --- /dev/null +++ b/_doc/index.rst @@ -0,0 +1,27 @@ + +*********** +ruyaml +*********** + +`Github `_ | +`PyPI `_ + + +Contents: + +.. toctree:: + :maxdepth: 2 + + overview + install + basicuse + dumpcls + detail + example + api + pyyaml + contributing + upmerge + +.. image:: https://readthedocs.org/projects/ruyaml/badge/?version=stable + :target: https://ruyaml.readthedocs.org/en/stable diff --git a/_doc/index.ryd b/_doc/index.ryd new file mode 100644 index 0000000..0ed9070 --- /dev/null +++ b/_doc/index.ryd @@ -0,0 +1,56 @@ +version: 0.1 +output: rst +fix_inline_single_backquotes: true +pdf: false +--- !comment | +Sections, subsections, etc. in .ryd files + # with overline, for parts + * with overline, for chapters + =, for sections + +, for subsections + ^, for subsubsections + ", for paragraphs + + don't use - or . as --- or ... interfere with ryd +--- | + +*********** +ruamel.yaml +*********** + +`SoureForge `_ | +`PyPI `_ + + +Contents: + +.. toctree:: + :maxdepth: 2 + + overview + install + basicuse + dumpcls + detail + example + api + pyyaml + contributing + +.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable + :target: https://yaml.readthedocs.org/en/stable + +.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge + :target: https://bestpractices.coreinfrastructure.org/projects/1128 + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw + :target: https://opensource.org/licenses/MIT + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw + :target: https://pypi.org/project/ruamel.yaml/ + +.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw + :target: https://pypi.org/project/oitnb/ + +.. image:: http://www.mypy-lang.org/static/mypy_badge.svg + :target: http://mypy-lang.org/ diff --git a/_doc/install.rst b/_doc/install.rst new file mode 100644 index 0000000..84fe871 --- /dev/null +++ b/_doc/install.rst @@ -0,0 +1,53 @@ +********** +Installing +********** + +Make sure you have a recent version of ``pip`` and ``setuptools`` +installed. The later needs environment marker support +(``setuptools>=20.6.8``) and that is e.g. bundled with Python 3.4.6 but +not with 3.4.4. It is probably best to do:: + + pip install -U pip setuptools wheel + +in your environment (``virtualenv``, (Docker) container, etc) before +installing ``ruyaml``. + +``ruyaml`` itself should be installed from PyPI_ using:: + + pip install ruyaml + +If you want to process jinja2/YAML templates (which are not valid YAML +with the default jinja2 markers), do ``pip install +ruyaml[jinja2]`` (you might need to quote the last argument +because of the ``[]``) + + +There also is a commandline utility ``yaml`` available after installing:: + + pip install ruyaml.cmd + +that allows for round-trip testing/re-indenting and conversion of YAML +files (JSON,INI,HTML tables) + +Optional requirements ++++++++++++++++++++++ + +If you have the the header files for your Python executables installed +then you can use the (non-roundtrip), but faster, C loader and emitter. + +On Debian systems you should use:: + + sudo apt-get install python3-dev + +you can leave out ``python3-dev`` if you don't use python3 + +For CentOS (7) based systems you should do:: + + sudo yum install python-devel + +.. _tox: https://pypi.python.org/pypi/tox +.. _py.test: http://pytest.org/latest/ +.. _YAML 1.1: http://www.yaml.org/spec/1.1/spec.html +.. _YAML 1.2: http://www.yaml.org/spec/1.2/spec.html +.. _PyPI: https://pypi.python.org/pypi +.. _ruyaml: https://pypi.python.org/pypi/ruyaml diff --git a/_doc/overview.rst b/_doc/overview.rst new file mode 100644 index 0000000..7faca68 --- /dev/null +++ b/_doc/overview.rst @@ -0,0 +1,48 @@ +******** +Overview +******** + +``ruyaml`` is a YAML 1.2 loader/dumper package for Python. It is a +derivative of Kirill Simonov's `PyYAML 3.11 +`_. + +``ruyaml`` supports `YAML 1.2`_ and has round-trip loaders and dumpers. + +- comments +- block style and key ordering are kept, so you can diff the round-tripped + source +- flow style sequences ( 'a: b, c, d') (based on request and test by + Anthony Sottile) +- anchor names that are hand-crafted (i.e. not of the form``idNNN``) +- `merges `_ in dictionaries are preserved + +This preservation is normally not broken unless you severely alter +the structure of a component (delete a key in a dict, remove list entries). +Reassigning values or replacing list items, etc., is fine. + +For the specific 1.2 differences see :ref:`yaml-1-2-support` + +Although individual indentation of lines is not preserved, you can specify +separate indentation levels for mappings and sequences (counting for sequences +does **not** include the dash for a sequence element) and specific offset of +block sequence dashes within that indentation. + + +Although ``ruyaml`` still allows most of the PyYAML way of doing +things, adding features required a different API then the transient +nature of PyYAML's ``Loader`` and ``Dumper``. Starting with +``ruyaml`` version 0.15.0 this new API gets introduced. Old ways +that get in the way will be removed, after first generating warnings +on use, then generating an error. In general a warning in version 0.N.x will become an +error in 0.N+1.0 + + +Many of the bugs filed against PyYAML, but that were never +acted upon, have been fixed in ``ruyaml`` + +.. _tox: https://pypi.python.org/pypi/tox +.. _py.test: http://pytest.org/latest/ +.. _YAML 1.1: http://www.yaml.org/spec/1.1/spec.html +.. _YAML 1.2: http://www.yaml.org/spec/1.2/spec.html +.. _PyPI: https://pypi.python.org/pypi +.. _ruyaml: https://pypi.python.org/pypi/ruyaml diff --git a/_doc/pyyaml.rst b/_doc/pyyaml.rst new file mode 100644 index 0000000..084bd70 --- /dev/null +++ b/_doc/pyyaml.rst @@ -0,0 +1,80 @@ +*********************** +Differences with PyYAML +*********************** + +.. parsed-literal:: + + *If I have seen further, it is by standing on the shoulders of giants*. + Isaac Newton (1676) + + + +``ruyaml`` is a derivative of Kirill Simonov's `PyYAML 3.11 +`_ and would not exist without that +excellent base to start from. + +The following a summary of the major differences with PyYAML 3.11 + +.. _yaml-1-2-support: + +Defaulting to YAML 1.2 support +++++++++++++++++++++++++++++++ + +PyYAML supports the `YAML 1.1`_ standard, ``ruyaml`` supports +`YAML 1.2`_ as released in 2009. + +- YAML 1.2 dropped support for several features unquoted ``Yes``, + ``No``, ``On``, ``Off`` +- YAML 1.2 no longer accepts strings that start with a ``0`` and solely + consist of number characters as octal, you need to specify such strings with + ``0o[0-7]+`` (zero + lower-case o for octal + one or more octal characters). +- YAML 1.2 no longer supports `sexagesimals + `_, so the string scalar + ``12:34:56`` doesn't need quoting. +- ``\/`` escape for JSON compatibility +- correct parsing of floating point scalars with exponentials + +unless the YAML document is loaded with an explicit ``version==1.1`` or +the document starts with:: + + % YAML 1.1 + +, ``ruyaml`` will load the document as version 1.2. + + +Python Compatibility +++++++++++++++++++++ + +``ruyaml`` requires Python 3.6 or later. + +Fixes ++++++ + +- ``ruyaml`` follows the ``indent`` keyword argument on scalars + when dumping. +- ``ruyaml`` allows ``:`` in plain scalars, as long as these are not + followed by a space (as per the specification) + + +Testing ++++++++ + +``ruyaml`` is tested using `tox`_ and `py.test`_. In addition to +new tests, the original PyYAML +test framework is called from within ``tox`` runs. + +Before versions are pushed to PyPI, ``tox`` is invoked, and has to pass, on all +supported Python versions, on PyPI as well as flake8/pep8 + +API ++++ + +Starting with 0.15 the API for using ``ruyaml`` has diverged allowing +easier addition of new features. + +.. _tox: https://pypi.python.org/pypi/tox +.. _py.test: http://pytest.org/latest/ +.. _YAML 1.1: http://www.yaml.org/spec/1.1/spec.html +.. _YAML 1.2: http://www.yaml.org/spec/1.2/spec.html +.. _PyPI: https://pypi.python.org/pypi +.. _ruyaml: https://pypi.python.org/pypi/ruyaml diff --git a/_doc/upmerge.rst b/_doc/upmerge.rst new file mode 100644 index 0000000..1f93bf9 --- /dev/null +++ b/_doc/upmerge.rst @@ -0,0 +1,97 @@ +************* +Upstrem Merge +************* + +The process to merge ``ruamel.yaml``'s Mercurial repository to ours is +non-trivial due to non-unique Mergurial-to-git imports and squash merges. + +Preparation +=========== + +We create a git import of the Upstream repository. Then we add a +pseudo-merge node to it which represents our version of the code +at the point where the last merge happened. The commit we want is most +likely named "Upstream 0.xx.yy". + +So, first we get a git copy of an HG clone of the ``ruamel.yaml`` +repository:: + + # install Mercurial (depends on your distribution) + + cd /your/src + mkdir -p ruyaml/git + cd ruyaml/git; git init + cd ../ + hg clone http://hg.code.sf.net/p/ruamel-yaml/code hg + +Next we prepare our repository for merging. We need a ``hg-fast-export`` +script:: + + cd .. + git clone git@github.com:frej/fast-export.git + +We use that script to setup our git copy:: + + cd ../git + ../fast-export/hg-fast-export.sh -r ../hg --ignore-unnamed-heads + +Now let's create a third repository for the actual work:: + + cd ../ + git clone git@github.com:pycontribs/ruyaml.git repo + cd repo + git remote add ../git ruamel + git fetch ruamel + +Create a branch for merging:: + + git checkout -b merge main + +This concludes setting things up. + +Incremental merge +================= + +First, let's pull the remote changes (if any):: + + cd /your/src/ruyaml/hg + hg pull + cd ../git + ../fast-export/hg-fast-export.sh -r ../hg --ignore-unnamed-heads + cd ../repo + git fetch --all + git checkout merge + +Next, we need a pseudo-merge that declares "we have merged all of Upstream +up to *THAT* into *THIS*", where *THIS* is the latest Merge commit in our +repository (typically named "Upstream 0.xx.yy") and *THAT* is the +corresponding commit in the Ruamel tree (it should be tagged 0.xx.yy):: + + git log --date-order --all --oneline + git reset --hard THIS + git merge -s ours THAT + +Now we'll "merge" the current Upstream sources:: + + git merge --squash ruamel/main + +This will create a heap of conflicts, but no commit yet. + +.. note:: + + The reason we do a squash-merge here is that otherwise git will + un-helpfully upload the complete history of ``ruamel.yaml`` to GitHub. + It's already there, of course, but due to the diverging git hashes that + doesn't help. + +The next step, obviously, is to fix the conflicts. (There will be a bunch.) +If git complains about a deleted ``__init__.py``, the solution is to ``git +rm -f __init__.py``. + +Then, commit your changes:: + + git commit -a -m "Merge Upstream 0.xx.yz" + git push -f origin merge + +Now check github. If everything is OK, congratulations, otherwise fix and +push (no need to repeat the ``-f``). diff --git a/_test/__init__.py b/_test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/a-nasty-libyaml-bug.loader-error b/_test/data/a-nasty-libyaml-bug.loader-error new file mode 100644 index 0000000..f97d49f --- /dev/null +++ b/_test/data/a-nasty-libyaml-bug.loader-error @@ -0,0 +1 @@ +[ [ \ No newline at end of file diff --git a/_test/data/aliases-cdumper-bug.code b/_test/data/aliases-cdumper-bug.code new file mode 100644 index 0000000..0168441 --- /dev/null +++ b/_test/data/aliases-cdumper-bug.code @@ -0,0 +1 @@ +[ today, today ] diff --git a/_test/data/aliases.events b/_test/data/aliases.events new file mode 100644 index 0000000..9139b51 --- /dev/null +++ b/_test/data/aliases.events @@ -0,0 +1,8 @@ +- !StreamStart +- !DocumentStart +- !SequenceStart +- !Scalar { anchor: 'myanchor', tag: '!mytag', value: 'data' } +- !Alias { anchor: 'myanchor' } +- !SequenceEnd +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/bool.data b/_test/data/bool.data new file mode 100644 index 0000000..ff99e77 --- /dev/null +++ b/_test/data/bool.data @@ -0,0 +1,18 @@ +- yes +- Yes +- YES +- no +- No +- NO +- true +- True +- TRUE +- false +- False +- FALSE +- on +- On +- ON +- off +- Off +- OFF diff --git a/_test/data/bool.detect b/_test/data/bool.detect new file mode 100644 index 0000000..947ebbb --- /dev/null +++ b/_test/data/bool.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:bool diff --git a/_test/data/colon-in-flow-context.loader-error b/_test/data/colon-in-flow-context.loader-error new file mode 100644 index 0000000..13d5087 --- /dev/null +++ b/_test/data/colon-in-flow-context.loader-error @@ -0,0 +1 @@ +{ foo:bar } diff --git a/_test/data/comment_no_eol.data b/_test/data/comment_no_eol.data new file mode 100644 index 0000000..f7b15f6 --- /dev/null +++ b/_test/data/comment_no_eol.data @@ -0,0 +1 @@ +european: 10 # abc \ No newline at end of file diff --git a/_test/data/composite_key.code b/_test/data/composite_key.code new file mode 100644 index 0000000..627b049 --- /dev/null +++ b/_test/data/composite_key.code @@ -0,0 +1 @@ +{('foo', 'bar'): 'baz'} diff --git a/_test/data/composite_key.data b/_test/data/composite_key.data new file mode 100644 index 0000000..d748e37 --- /dev/null +++ b/_test/data/composite_key.data @@ -0,0 +1,4 @@ +--- +? - foo + - bar +: baz diff --git a/_test/data/construct-binary-py3.code b/_test/data/construct-binary-py3.code new file mode 100644 index 0000000..30bfc3f --- /dev/null +++ b/_test/data/construct-binary-py3.code @@ -0,0 +1,7 @@ +{ + "canonical": + b"GIF89a\x0c\x00\x0c\x00\x84\x00\x00\xff\xff\xf7\xf5\xf5\xee\xe9\xe9\xe5fff\x00\x00\x00\xe7\xe7\xe7^^^\xf3\xf3\xed\x8e\x8e\x8e\xe0\xe0\xe0\x9f\x9f\x9f\x93\x93\x93\xa7\xa7\xa7\x9e\x9e\x9eiiiccc\xa3\xa3\xa3\x84\x84\x84\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9!\xfe\x0eMade with GIMP\x00,\x00\x00\x00\x00\x0c\x00\x0c\x00\x00\x05, \x8e\x810\x9e\xe3@\x14\xe8i\x10\xc4\xd1\x8a\x08\x1c\xcf\x80M$z\xef\xff0\x85p\xb8\xb01f\r\x1b\xce\x01\xc3\x01\x1e\x10' \x82\n\x01\x00;", + "generic": + b"GIF89a\x0c\x00\x0c\x00\x84\x00\x00\xff\xff\xf7\xf5\xf5\xee\xe9\xe9\xe5fff\x00\x00\x00\xe7\xe7\xe7^^^\xf3\xf3\xed\x8e\x8e\x8e\xe0\xe0\xe0\x9f\x9f\x9f\x93\x93\x93\xa7\xa7\xa7\x9e\x9e\x9eiiiccc\xa3\xa3\xa3\x84\x84\x84\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9\xff\xfe\xf9!\xfe\x0eMade with GIMP\x00,\x00\x00\x00\x00\x0c\x00\x0c\x00\x00\x05, \x8e\x810\x9e\xe3@\x14\xe8i\x10\xc4\xd1\x8a\x08\x1c\xcf\x80M$z\xef\xff0\x85p\xb8\xb01f\r\x1b\xce\x01\xc3\x01\x1e\x10' \x82\n\x01\x00;", + "description": "The binary value above is a tiny arrow encoded as a gif image.", +} diff --git a/_test/data/construct-binary-py3.data b/_test/data/construct-binary-py3.data new file mode 100644 index 0000000..dcdb16f --- /dev/null +++ b/_test/data/construct-binary-py3.data @@ -0,0 +1,12 @@ +canonical: !!binary "\ + R0lGODlhDAAMAIQAAP//9/X17unp5WZmZgAAAOfn515eXvPz7Y6OjuDg4J+fn5\ + OTk6enp56enmlpaWNjY6Ojo4SEhP/++f/++f/++f/++f/++f/++f/++f/++f/+\ + +f/++f/++f/++f/++f/++SH+Dk1hZGUgd2l0aCBHSU1QACwAAAAADAAMAAAFLC\ + AgjoEwnuNAFOhpEMTRiggcz4BNJHrv/zCFcLiwMWYNG84BwwEeECcgggoBADs=" +generic: !!binary | + R0lGODlhDAAMAIQAAP//9/X17unp5WZmZgAAAOfn515eXvPz7Y6OjuDg4J+fn5 + OTk6enp56enmlpaWNjY6Ojo4SEhP/++f/++f/++f/++f/++f/++f/++f/++f/+ + +f/++f/++f/++f/++f/++SH+Dk1hZGUgd2l0aCBHSU1QACwAAAAADAAMAAAFLC + AgjoEwnuNAFOhpEMTRiggcz4BNJHrv/zCFcLiwMWYNG84BwwEeECcgggoBADs= +description: + The binary value above is a tiny arrow encoded as a gif image. diff --git a/_test/data/construct-bool.code b/_test/data/construct-bool.code new file mode 100644 index 0000000..3d02580 --- /dev/null +++ b/_test/data/construct-bool.code @@ -0,0 +1,7 @@ +{ + "canonical": True, + "answer": False, + "logical": True, + "option": True, + "but": { "y": "is a string", "n": "is a string" }, +} diff --git a/_test/data/construct-bool.data b/_test/data/construct-bool.data new file mode 100644 index 0000000..36d6519 --- /dev/null +++ b/_test/data/construct-bool.data @@ -0,0 +1,9 @@ +canonical: yes +answer: NO +logical: True +option: on + + +but: + y: is a string + n: is a string diff --git a/_test/data/construct-custom.code b/_test/data/construct-custom.code new file mode 100644 index 0000000..2d5f063 --- /dev/null +++ b/_test/data/construct-custom.code @@ -0,0 +1,10 @@ +[ + MyTestClass1(x=1), + MyTestClass1(x=1, y=2, z=3), + MyTestClass2(x=10), + MyTestClass2(x=10, y=20, z=30), + MyTestClass3(x=1), + MyTestClass3(x=1, y=2, z=3), + MyTestClass3(x=1, y=2, z=3), + YAMLObject1(my_parameter='foo', my_another_parameter=[1,2,3]) +] diff --git a/_test/data/construct-custom.data b/_test/data/construct-custom.data new file mode 100644 index 0000000..9db0f64 --- /dev/null +++ b/_test/data/construct-custom.data @@ -0,0 +1,26 @@ +--- +- !tag1 + x: 1 +- !tag1 + x: 1 + 'y': 2 + z: 3 +- !tag2 + 10 +- !tag2 + =: 10 + 'y': 20 + z: 30 +- !tag3 + x: 1 +- !tag3 + x: 1 + 'y': 2 + z: 3 +- !tag3 + =: 1 + 'y': 2 + z: 3 +- !foo + my-parameter: foo + my-another-parameter: [1,2,3] diff --git a/_test/data/construct-float.code b/_test/data/construct-float.code new file mode 100644 index 0000000..8493bf2 --- /dev/null +++ b/_test/data/construct-float.code @@ -0,0 +1,8 @@ +{ + "canonical": 685230.15, + "exponential": 685230.15, + "fixed": 685230.15, + "sexagesimal": 685230.15, + "negative infinity": -1e300000, + "not a number": 1e300000/1e300000, +} diff --git a/_test/data/construct-float.data b/_test/data/construct-float.data new file mode 100644 index 0000000..b662c62 --- /dev/null +++ b/_test/data/construct-float.data @@ -0,0 +1,6 @@ +canonical: 6.8523015e+5 +exponential: 685.230_15e+03 +fixed: 685_230.15 +sexagesimal: 190:20:30.15 +negative infinity: -.inf +not a number: .NaN diff --git a/_test/data/construct-int.code b/_test/data/construct-int.code new file mode 100644 index 0000000..1058f7b --- /dev/null +++ b/_test/data/construct-int.code @@ -0,0 +1,8 @@ +{ + "canonical": 685230, + "decimal": 685230, + "octal": 685230, + "hexadecimal": 685230, + "binary": 685230, + "sexagesimal": 685230, +} diff --git a/_test/data/construct-int.data b/_test/data/construct-int.data new file mode 100644 index 0000000..852c314 --- /dev/null +++ b/_test/data/construct-int.data @@ -0,0 +1,6 @@ +canonical: 685230 +decimal: +685_230 +octal: 02472256 +hexadecimal: 0x_0A_74_AE +binary: 0b1010_0111_0100_1010_1110 +sexagesimal: 190:20:30 diff --git a/_test/data/construct-map.code b/_test/data/construct-map.code new file mode 100644 index 0000000..736ba48 --- /dev/null +++ b/_test/data/construct-map.code @@ -0,0 +1,6 @@ +{ + "Block style": + { "Clark" : "Evans", "Brian" : "Ingerson", "Oren" : "Ben-Kiki" }, + "Flow style": + { "Clark" : "Evans", "Brian" : "Ingerson", "Oren" : "Ben-Kiki" }, +} diff --git a/_test/data/construct-map.data b/_test/data/construct-map.data new file mode 100644 index 0000000..022446d --- /dev/null +++ b/_test/data/construct-map.data @@ -0,0 +1,6 @@ +# Unordered set of key: value pairs. +Block style: !!map + Clark : Evans + Brian : Ingerson + Oren : Ben-Kiki +Flow style: !!map { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki } diff --git a/_test/data/construct-merge.code b/_test/data/construct-merge.code new file mode 100644 index 0000000..6cd419d --- /dev/null +++ b/_test/data/construct-merge.code @@ -0,0 +1,10 @@ +[ + { "x": 1, "y": 2 }, + { "x": 0, "y": 2 }, + { "r": 10 }, + { "r": 1 }, + { "x": 1, "y": 2, "r": 10, "label": "center/big" }, + { "x": 1, "y": 2, "r": 10, "label": "center/big" }, + { "x": 1, "y": 2, "r": 10, "label": "center/big" }, + { "x": 1, "y": 2, "r": 10, "label": "center/big" }, +] diff --git a/_test/data/construct-merge.data b/_test/data/construct-merge.data new file mode 100644 index 0000000..3fdb2e2 --- /dev/null +++ b/_test/data/construct-merge.data @@ -0,0 +1,27 @@ +--- +- &CENTER { x: 1, 'y': 2 } +- &LEFT { x: 0, 'y': 2 } +- &BIG { r: 10 } +- &SMALL { r: 1 } + +# All the following maps are equal: + +- # Explicit keys + x: 1 + 'y': 2 + r: 10 + label: center/big + +- # Merge one map + << : *CENTER + r: 10 + label: center/big + +- # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +- # Override + << : [ *BIG, *LEFT, *SMALL ] + x: 1 + label: center/big diff --git a/_test/data/construct-null.code b/_test/data/construct-null.code new file mode 100644 index 0000000..a895eaa --- /dev/null +++ b/_test/data/construct-null.code @@ -0,0 +1,13 @@ +[ + None, + { "empty": None, "canonical": None, "english": None, None: "null key" }, + { + "sparse": [ + None, + "2nd entry", + None, + "4th entry", + None, + ], + }, +] diff --git a/_test/data/construct-null.data b/_test/data/construct-null.data new file mode 100644 index 0000000..9ad0344 --- /dev/null +++ b/_test/data/construct-null.data @@ -0,0 +1,18 @@ +# A document may be null. +--- +--- +# This mapping has four keys, +# one has a value. +empty: +canonical: ~ +english: null +~: null key +--- +# This sequence has five +# entries, two have values. +sparse: + - ~ + - 2nd entry + - + - 4th entry + - Null diff --git a/_test/data/construct-omap.code b/_test/data/construct-omap.code new file mode 100644 index 0000000..33a1574 --- /dev/null +++ b/_test/data/construct-omap.code @@ -0,0 +1,8 @@ +{ + "Bestiary": ordereddict([ + ("aardvark", "African pig-like ant eater. Ugly."), + ("anteater", "South-American ant eater. Two species."), + ("anaconda", "South-American constrictor snake. Scaly."), + ]), + "Numbers": ordereddict([ ("one", 4), ("one", 1), ("two", 2), ("three", 3) ]), +} diff --git a/_test/data/construct-omap.data b/_test/data/construct-omap.data new file mode 100644 index 0000000..4fa0f45 --- /dev/null +++ b/_test/data/construct-omap.data @@ -0,0 +1,8 @@ +# Explicitly typed ordered map (dictionary). +Bestiary: !!omap + - aardvark: African pig-like ant eater. Ugly. + - anteater: South-American ant eater. Two species. + - anaconda: South-American constrictor snake. Scaly. + # Etc. +# Flow style +Numbers: !!omap [ one: 1, two: 2, three : 3 ] diff --git a/_test/data/construct-pairs.code b/_test/data/construct-pairs.code new file mode 100644 index 0000000..64f86ee --- /dev/null +++ b/_test/data/construct-pairs.code @@ -0,0 +1,9 @@ +{ + "Block tasks": [ + ("meeting", "with team."), + ("meeting", "with boss."), + ("break", "lunch."), + ("meeting", "with client."), + ], + "Flow tasks": [ ("meeting", "with team"), ("meeting", "with boss") ], +} diff --git a/_test/data/construct-pairs.data b/_test/data/construct-pairs.data new file mode 100644 index 0000000..05f55b9 --- /dev/null +++ b/_test/data/construct-pairs.data @@ -0,0 +1,7 @@ +# Explicitly typed pairs. +Block tasks: !!pairs + - meeting: with team. + - meeting: with boss. + - break: lunch. + - meeting: with client. +Flow tasks: !!pairs [ meeting: with team, meeting: with boss ] diff --git a/_test/data/construct-python-bool.code b/_test/data/construct-python-bool.code new file mode 100644 index 0000000..170da01 --- /dev/null +++ b/_test/data/construct-python-bool.code @@ -0,0 +1 @@ +[ True, False ] diff --git a/_test/data/construct-python-bool.data b/_test/data/construct-python-bool.data new file mode 100644 index 0000000..0068869 --- /dev/null +++ b/_test/data/construct-python-bool.data @@ -0,0 +1 @@ +[ !!python/bool True, !!python/bool False ] diff --git a/_test/data/construct-python-bytes-py3.code b/_test/data/construct-python-bytes-py3.code new file mode 100644 index 0000000..b9051d8 --- /dev/null +++ b/_test/data/construct-python-bytes-py3.code @@ -0,0 +1 @@ +b'some binary data' diff --git a/_test/data/construct-python-bytes-py3.data b/_test/data/construct-python-bytes-py3.data new file mode 100644 index 0000000..9528725 --- /dev/null +++ b/_test/data/construct-python-bytes-py3.data @@ -0,0 +1 @@ +--- !!python/bytes 'c29tZSBiaW5hcnkgZGF0YQ==' diff --git a/_test/data/construct-python-complex.code b/_test/data/construct-python-complex.code new file mode 100644 index 0000000..e582dff --- /dev/null +++ b/_test/data/construct-python-complex.code @@ -0,0 +1 @@ +[0.5+0j, 0.5+0.5j, 0.5j, -0.5+0.5j, -0.5+0j, -0.5-0.5j, -0.5j, 0.5-0.5j] diff --git a/_test/data/construct-python-complex.data b/_test/data/construct-python-complex.data new file mode 100644 index 0000000..17ebad4 --- /dev/null +++ b/_test/data/construct-python-complex.data @@ -0,0 +1,8 @@ +- !!python/complex 0.5+0j +- !!python/complex 0.5+0.5j +- !!python/complex 0.5j +- !!python/complex -0.5+0.5j +- !!python/complex -0.5+0j +- !!python/complex -0.5-0.5j +- !!python/complex -0.5j +- !!python/complex 0.5-0.5j diff --git a/_test/data/construct-python-float.code b/_test/data/construct-python-float.code new file mode 100644 index 0000000..d5910a0 --- /dev/null +++ b/_test/data/construct-python-float.code @@ -0,0 +1 @@ +123.456 diff --git a/_test/data/construct-python-float.data b/_test/data/construct-python-float.data new file mode 100644 index 0000000..b460eb8 --- /dev/null +++ b/_test/data/construct-python-float.data @@ -0,0 +1 @@ +!!python/float 123.456 diff --git a/_test/data/construct-python-int.code b/_test/data/construct-python-int.code new file mode 100644 index 0000000..190a180 --- /dev/null +++ b/_test/data/construct-python-int.code @@ -0,0 +1 @@ +123 diff --git a/_test/data/construct-python-int.data b/_test/data/construct-python-int.data new file mode 100644 index 0000000..741d669 --- /dev/null +++ b/_test/data/construct-python-int.data @@ -0,0 +1 @@ +!!python/int 123 diff --git a/_test/data/construct-python-long-short-py3.code b/_test/data/construct-python-long-short-py3.code new file mode 100644 index 0000000..190a180 --- /dev/null +++ b/_test/data/construct-python-long-short-py3.code @@ -0,0 +1 @@ +123 diff --git a/_test/data/construct-python-long-short-py3.data b/_test/data/construct-python-long-short-py3.data new file mode 100644 index 0000000..4bd5dc2 --- /dev/null +++ b/_test/data/construct-python-long-short-py3.data @@ -0,0 +1 @@ +!!python/long 123 diff --git a/_test/data/construct-python-name-module.code b/_test/data/construct-python-name-module.code new file mode 100644 index 0000000..6f39148 --- /dev/null +++ b/_test/data/construct-python-name-module.code @@ -0,0 +1 @@ +[str, yaml.Loader, yaml.dump, abs, yaml.tokens] diff --git a/_test/data/construct-python-name-module.data b/_test/data/construct-python-name-module.data new file mode 100644 index 0000000..f0c9712 --- /dev/null +++ b/_test/data/construct-python-name-module.data @@ -0,0 +1,5 @@ +- !!python/name:str +- !!python/name:yaml.Loader +- !!python/name:yaml.dump +- !!python/name:abs +- !!python/module:yaml.tokens diff --git a/_test/data/construct-python-none.code b/_test/data/construct-python-none.code new file mode 100644 index 0000000..b0047fa --- /dev/null +++ b/_test/data/construct-python-none.code @@ -0,0 +1 @@ +None diff --git a/_test/data/construct-python-none.data b/_test/data/construct-python-none.data new file mode 100644 index 0000000..7907ec3 --- /dev/null +++ b/_test/data/construct-python-none.data @@ -0,0 +1 @@ +!!python/none diff --git a/_test/data/construct-python-object.code b/_test/data/construct-python-object.code new file mode 100644 index 0000000..7f1edf1 --- /dev/null +++ b/_test/data/construct-python-object.code @@ -0,0 +1,23 @@ +[ +AnObject(1, 'two', [3,3,3]), +AnInstance(1, 'two', [3,3,3]), + +AnObject(1, 'two', [3,3,3]), +AnInstance(1, 'two', [3,3,3]), + +AState(1, 'two', [3,3,3]), +ACustomState(1, 'two', [3,3,3]), + +InitArgs(1, 'two', [3,3,3]), +InitArgsWithState(1, 'two', [3,3,3]), + +NewArgs(1, 'two', [3,3,3]), +NewArgsWithState(1, 'two', [3,3,3]), + +Reduce(1, 'two', [3,3,3]), +ReduceWithState(1, 'two', [3,3,3]), + +MyInt(3), +MyList(3), +MyDict(3), +] diff --git a/_test/data/construct-python-object.data b/_test/data/construct-python-object.data new file mode 100644 index 0000000..bce8b2e --- /dev/null +++ b/_test/data/construct-python-object.data @@ -0,0 +1,21 @@ +- !!python/object:test_constructor.AnObject { foo: 1, bar: two, baz: [3,3,3] } +- !!python/object:test_constructor.AnInstance { foo: 1, bar: two, baz: [3,3,3] } + +- !!python/object/new:test_constructor.AnObject { args: [1, two], kwds: {baz: [3,3,3]} } +- !!python/object/apply:test_constructor.AnInstance { args: [1, two], kwds: {baz: [3,3,3]} } + +- !!python/object:test_constructor.AState { _foo: 1, _bar: two, _baz: [3,3,3] } +- !!python/object/new:test_constructor.ACustomState { state: !!python/tuple [1, two, [3,3,3]] } + +- !!python/object/new:test_constructor.InitArgs [1, two, [3,3,3]] +- !!python/object/new:test_constructor.InitArgsWithState { args: [1, two], state: [3,3,3] } + +- !!python/object/new:test_constructor.NewArgs [1, two, [3,3,3]] +- !!python/object/new:test_constructor.NewArgsWithState { args: [1, two], state: [3,3,3] } + +- !!python/object/apply:test_constructor.Reduce [1, two, [3,3,3]] +- !!python/object/apply:test_constructor.ReduceWithState { args: [1, two], state: [3,3,3] } + +- !!python/object/new:test_constructor.MyInt [3] +- !!python/object/new:test_constructor.MyList { listitems: [~, ~, ~] } +- !!python/object/new:test_constructor.MyDict { dictitems: {0, 1, 2} } diff --git a/_test/data/construct-python-str-ascii.code b/_test/data/construct-python-str-ascii.code new file mode 100644 index 0000000..d9d62f6 --- /dev/null +++ b/_test/data/construct-python-str-ascii.code @@ -0,0 +1 @@ +"ascii string" diff --git a/_test/data/construct-python-str-ascii.data b/_test/data/construct-python-str-ascii.data new file mode 100644 index 0000000..a83349e --- /dev/null +++ b/_test/data/construct-python-str-ascii.data @@ -0,0 +1 @@ +--- !!python/str "ascii string" diff --git a/_test/data/construct-python-str-utf8-py2.code b/_test/data/construct-python-str-utf8-py2.code new file mode 100644 index 0000000..6ca7d8f --- /dev/null +++ b/_test/data/construct-python-str-utf8-py2.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430'.encode('utf-8') diff --git a/_test/data/construct-python-str-utf8-py3.code b/_test/data/construct-python-str-utf8-py3.code new file mode 100644 index 0000000..9f66032 --- /dev/null +++ b/_test/data/construct-python-str-utf8-py3.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430' diff --git a/_test/data/construct-python-str-utf8-py3.data b/_test/data/construct-python-str-utf8-py3.data new file mode 100644 index 0000000..9ef2c72 --- /dev/null +++ b/_test/data/construct-python-str-utf8-py3.data @@ -0,0 +1 @@ +--- !!python/str "Это ÑƒÐ½Ð¸ÐºÐ¾Ð´Ð½Ð°Ñ Ñтрока" diff --git a/_test/data/construct-python-tuple-list-dict.code b/_test/data/construct-python-tuple-list-dict.code new file mode 100644 index 0000000..20ced98 --- /dev/null +++ b/_test/data/construct-python-tuple-list-dict.code @@ -0,0 +1,6 @@ +[ + [1, 2, 3, 4], + (1, 2, 3, 4), + {1: 2, 3: 4}, + {(0,0): 0, (0,1): 1, (1,0): 1, (1,1): 0}, +] diff --git a/_test/data/construct-python-tuple-list-dict.data b/_test/data/construct-python-tuple-list-dict.data new file mode 100644 index 0000000..c56159b --- /dev/null +++ b/_test/data/construct-python-tuple-list-dict.data @@ -0,0 +1,8 @@ +- !!python/list [1, 2, 3, 4] +- !!python/tuple [1, 2, 3, 4] +- !!python/dict {1: 2, 3: 4} +- !!python/dict + !!python/tuple [0,0]: 0 + !!python/tuple [0,1]: 1 + !!python/tuple [1,0]: 1 + !!python/tuple [1,1]: 0 diff --git a/_test/data/construct-python-unicode-ascii-py3.code b/_test/data/construct-python-unicode-ascii-py3.code new file mode 100644 index 0000000..d9d62f6 --- /dev/null +++ b/_test/data/construct-python-unicode-ascii-py3.code @@ -0,0 +1 @@ +"ascii string" diff --git a/_test/data/construct-python-unicode-ascii-py3.data b/_test/data/construct-python-unicode-ascii-py3.data new file mode 100644 index 0000000..3a0647b --- /dev/null +++ b/_test/data/construct-python-unicode-ascii-py3.data @@ -0,0 +1 @@ +--- !!python/unicode "ascii string" diff --git a/_test/data/construct-python-unicode-utf8-py2.code b/_test/data/construct-python-unicode-utf8-py2.code new file mode 100644 index 0000000..9f66032 --- /dev/null +++ b/_test/data/construct-python-unicode-utf8-py2.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430' diff --git a/_test/data/construct-python-unicode-utf8-py3.code b/_test/data/construct-python-unicode-utf8-py3.code new file mode 100644 index 0000000..9f66032 --- /dev/null +++ b/_test/data/construct-python-unicode-utf8-py3.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430' diff --git a/_test/data/construct-python-unicode-utf8-py3.data b/_test/data/construct-python-unicode-utf8-py3.data new file mode 100644 index 0000000..5a980ea --- /dev/null +++ b/_test/data/construct-python-unicode-utf8-py3.data @@ -0,0 +1 @@ +--- !!python/unicode "Это ÑƒÐ½Ð¸ÐºÐ¾Ð´Ð½Ð°Ñ Ñтрока" diff --git a/_test/data/construct-seq.code b/_test/data/construct-seq.code new file mode 100644 index 0000000..0c90c05 --- /dev/null +++ b/_test/data/construct-seq.code @@ -0,0 +1,4 @@ +{ + "Block style": ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"], + "Flow style": ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune", "Pluto"], +} diff --git a/_test/data/construct-seq.data b/_test/data/construct-seq.data new file mode 100644 index 0000000..bb92fd1 --- /dev/null +++ b/_test/data/construct-seq.data @@ -0,0 +1,15 @@ +# Ordered sequence of nodes +Block style: !!seq +- Mercury # Rotates - no light/dark sides. +- Venus # Deadliest. Aptly named. +- Earth # Mostly dirt. +- Mars # Seems empty. +- Jupiter # The king. +- Saturn # Pretty. +- Uranus # Where the sun hardly shines. +- Neptune # Boring. No rings. +- Pluto # You call this a planet? +Flow style: !!seq [ Mercury, Venus, Earth, Mars, # Rocks + Jupiter, Saturn, Uranus, Neptune, # Gas + Pluto ] # Overrated + diff --git a/_test/data/construct-set.code b/_test/data/construct-set.code new file mode 100644 index 0000000..aa090e8 --- /dev/null +++ b/_test/data/construct-set.code @@ -0,0 +1,4 @@ +{ + "baseball players": set(["Mark McGwire", "Sammy Sosa", "Ken Griffey"]), + "baseball teams": set(["Boston Red Sox", "Detroit Tigers", "New York Yankees"]), +} diff --git a/_test/data/construct-set.data b/_test/data/construct-set.data new file mode 100644 index 0000000..e05dc88 --- /dev/null +++ b/_test/data/construct-set.data @@ -0,0 +1,7 @@ +# Explicitly typed set. +baseball players: !!set + ? Mark McGwire + ? Sammy Sosa + ? Ken Griffey +# Flow style +baseball teams: !!set { Boston Red Sox, Detroit Tigers, New York Yankees } diff --git a/_test/data/construct-str-ascii.code b/_test/data/construct-str-ascii.code new file mode 100644 index 0000000..d9d62f6 --- /dev/null +++ b/_test/data/construct-str-ascii.code @@ -0,0 +1 @@ +"ascii string" diff --git a/_test/data/construct-str-ascii.data b/_test/data/construct-str-ascii.data new file mode 100644 index 0000000..0d93013 --- /dev/null +++ b/_test/data/construct-str-ascii.data @@ -0,0 +1 @@ +--- !!str "ascii string" diff --git a/_test/data/construct-str-utf8-py2.code b/_test/data/construct-str-utf8-py2.code new file mode 100644 index 0000000..9f66032 --- /dev/null +++ b/_test/data/construct-str-utf8-py2.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430' diff --git a/_test/data/construct-str-utf8-py3.code b/_test/data/construct-str-utf8-py3.code new file mode 100644 index 0000000..9f66032 --- /dev/null +++ b/_test/data/construct-str-utf8-py3.code @@ -0,0 +1 @@ +'\u042d\u0442\u043e \u0443\u043d\u0438\u043a\u043e\u0434\u043d\u0430\u044f \u0441\u0442\u0440\u043e\u043a\u0430' diff --git a/_test/data/construct-str-utf8-py3.data b/_test/data/construct-str-utf8-py3.data new file mode 100644 index 0000000..e355f18 --- /dev/null +++ b/_test/data/construct-str-utf8-py3.data @@ -0,0 +1 @@ +--- !!str "Это ÑƒÐ½Ð¸ÐºÐ¾Ð´Ð½Ð°Ñ Ñтрока" diff --git a/_test/data/construct-str.code b/_test/data/construct-str.code new file mode 100644 index 0000000..8d57214 --- /dev/null +++ b/_test/data/construct-str.code @@ -0,0 +1 @@ +{ "string": "abcd" } diff --git a/_test/data/construct-str.data b/_test/data/construct-str.data new file mode 100644 index 0000000..606ac6b --- /dev/null +++ b/_test/data/construct-str.data @@ -0,0 +1 @@ +string: abcd diff --git a/_test/data/construct-timestamp.code b/_test/data/construct-timestamp.code new file mode 100644 index 0000000..ffc3b2f --- /dev/null +++ b/_test/data/construct-timestamp.code @@ -0,0 +1,7 @@ +{ + "canonical": datetime.datetime(2001, 12, 15, 2, 59, 43, 100000), + "valid iso8601": datetime.datetime(2001, 12, 15, 2, 59, 43, 100000), + "space separated": datetime.datetime(2001, 12, 15, 2, 59, 43, 100000), + "no time zone (Z)": datetime.datetime(2001, 12, 15, 2, 59, 43, 100000), + "date (00:00:00Z)": datetime.date(2002, 12, 14), +} diff --git a/_test/data/construct-timestamp.data b/_test/data/construct-timestamp.data new file mode 100644 index 0000000..c5f3840 --- /dev/null +++ b/_test/data/construct-timestamp.data @@ -0,0 +1,5 @@ +canonical: 2001-12-15T02:59:43.1Z +valid iso8601: 2001-12-14t21:59:43.10-05:00 +space separated: 2001-12-14 21:59:43.10 -5 +no time zone (Z): 2001-12-15 2:59:43.10 +date (00:00:00Z): 2002-12-14 diff --git a/_test/data/construct-value.code b/_test/data/construct-value.code new file mode 100644 index 0000000..f1f015e --- /dev/null +++ b/_test/data/construct-value.code @@ -0,0 +1,9 @@ +[ + { "link with": [ "library1.dll", "library2.dll" ] }, + { + "link with": [ + { "=": "library1.dll", "version": 1.2 }, + { "=": "library2.dll", "version": 2.3 }, + ], + }, +] diff --git a/_test/data/construct-value.data b/_test/data/construct-value.data new file mode 100644 index 0000000..3eb7919 --- /dev/null +++ b/_test/data/construct-value.data @@ -0,0 +1,10 @@ +--- # Old schema +link with: + - library1.dll + - library2.dll +--- # New schema +link with: + - = : library1.dll + version: 1.2 + - = : library2.dll + version: 2.3 diff --git a/_test/data/document-separator-in-quoted-scalar.loader-error b/_test/data/document-separator-in-quoted-scalar.loader-error new file mode 100644 index 0000000..9eeb0d6 --- /dev/null +++ b/_test/data/document-separator-in-quoted-scalar.loader-error @@ -0,0 +1,11 @@ +--- +"this --- is correct" +--- +"this +...is also +correct" +--- +"a quoted scalar +cannot contain +--- +document separators" diff --git a/_test/data/documents.events b/_test/data/documents.events new file mode 100644 index 0000000..775a51a --- /dev/null +++ b/_test/data/documents.events @@ -0,0 +1,11 @@ +- !StreamStart +- !DocumentStart { explicit: false } +- !Scalar { implicit: [true,false], value: 'data' } +- !DocumentEnd +- !DocumentStart +- !Scalar { implicit: [true,false] } +- !DocumentEnd +- !DocumentStart { version: [1,1], tags: { '!': '!foo', '!yaml!': 'tag:yaml.org,2002:', '!ugly!': '!!!!!!!' } } +- !Scalar { implicit: [true,false] } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/duplicate-anchor-1.loader-warning b/_test/data/duplicate-anchor-1.loader-warning new file mode 100644 index 0000000..906cf29 --- /dev/null +++ b/_test/data/duplicate-anchor-1.loader-warning @@ -0,0 +1,3 @@ +- &foo bar +- &bar bar +- &foo bar diff --git a/_test/data/duplicate-anchor-2.loader-warning b/_test/data/duplicate-anchor-2.loader-warning new file mode 100644 index 0000000..62b4389 --- /dev/null +++ b/_test/data/duplicate-anchor-2.loader-warning @@ -0,0 +1 @@ +&foo [1, 2, 3, &foo 4] diff --git a/_test/data/duplicate-merge-key.former-loader-error.code b/_test/data/duplicate-merge-key.former-loader-error.code new file mode 100644 index 0000000..6a757f3 --- /dev/null +++ b/_test/data/duplicate-merge-key.former-loader-error.code @@ -0,0 +1 @@ +{ 'x': 1, 'y': 2, 'foo': 'bar', 'z': 3, 't': 4 } diff --git a/_test/data/duplicate-tag-directive.loader-error b/_test/data/duplicate-tag-directive.loader-error new file mode 100644 index 0000000..50c81a0 --- /dev/null +++ b/_test/data/duplicate-tag-directive.loader-error @@ -0,0 +1,3 @@ +%TAG !foo! bar +%TAG !foo! baz +--- foo diff --git a/_test/data/duplicate-yaml-directive.loader-error b/_test/data/duplicate-yaml-directive.loader-error new file mode 100644 index 0000000..9b72390 --- /dev/null +++ b/_test/data/duplicate-yaml-directive.loader-error @@ -0,0 +1,3 @@ +%YAML 1.1 +%YAML 1.1 +--- foo diff --git a/_test/data/emit-block-scalar-in-simple-key-context-bug.canonical b/_test/data/emit-block-scalar-in-simple-key-context-bug.canonical new file mode 100644 index 0000000..473bed5 --- /dev/null +++ b/_test/data/emit-block-scalar-in-simple-key-context-bug.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- !!map +{ + ? !!str "foo" + : !!str "bar" +} diff --git a/_test/data/emit-block-scalar-in-simple-key-context-bug.data b/_test/data/emit-block-scalar-in-simple-key-context-bug.data new file mode 100644 index 0000000..b6b42ba --- /dev/null +++ b/_test/data/emit-block-scalar-in-simple-key-context-bug.data @@ -0,0 +1,4 @@ +? |- + foo +: |- + bar diff --git a/_test/data/emitting-unacceptable-unicode-character-bug-py3.code b/_test/data/emitting-unacceptable-unicode-character-bug-py3.code new file mode 100644 index 0000000..2a5df00 --- /dev/null +++ b/_test/data/emitting-unacceptable-unicode-character-bug-py3.code @@ -0,0 +1 @@ +"\udd00" diff --git a/_test/data/emitting-unacceptable-unicode-character-bug-py3.data b/_test/data/emitting-unacceptable-unicode-character-bug-py3.data new file mode 100644 index 0000000..2a5df00 --- /dev/null +++ b/_test/data/emitting-unacceptable-unicode-character-bug-py3.data @@ -0,0 +1 @@ +"\udd00" diff --git a/_test/data/emitting-unacceptable-unicode-character-bug-py3.skip-ext b/_test/data/emitting-unacceptable-unicode-character-bug-py3.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/empty-anchor.emitter-error b/_test/data/empty-anchor.emitter-error new file mode 100644 index 0000000..ce663b6 --- /dev/null +++ b/_test/data/empty-anchor.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart +- !Scalar { anchor: '', value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/empty-document-bug.canonical b/_test/data/empty-document-bug.canonical new file mode 100644 index 0000000..28a6cf1 --- /dev/null +++ b/_test/data/empty-document-bug.canonical @@ -0,0 +1 @@ +# This YAML stream contains no YAML documents. diff --git a/_test/data/empty-document-bug.data b/_test/data/empty-document-bug.data new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/empty-document-bug.empty b/_test/data/empty-document-bug.empty new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/empty-documents.single-loader-error b/_test/data/empty-documents.single-loader-error new file mode 100644 index 0000000..f8dba8d --- /dev/null +++ b/_test/data/empty-documents.single-loader-error @@ -0,0 +1,2 @@ +--- # first document +--- # second document diff --git a/_test/data/empty-python-module.loader-error b/_test/data/empty-python-module.loader-error new file mode 100644 index 0000000..83d3232 --- /dev/null +++ b/_test/data/empty-python-module.loader-error @@ -0,0 +1 @@ +--- !!python:module: diff --git a/_test/data/empty-python-name.loader-error b/_test/data/empty-python-name.loader-error new file mode 100644 index 0000000..6162957 --- /dev/null +++ b/_test/data/empty-python-name.loader-error @@ -0,0 +1 @@ +--- !!python/name: empty diff --git a/_test/data/empty-tag-handle.emitter-error b/_test/data/empty-tag-handle.emitter-error new file mode 100644 index 0000000..235c899 --- /dev/null +++ b/_test/data/empty-tag-handle.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart { tags: { '': 'bar' } } +- !Scalar { value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/empty-tag-prefix.emitter-error b/_test/data/empty-tag-prefix.emitter-error new file mode 100644 index 0000000..c6c0e95 --- /dev/null +++ b/_test/data/empty-tag-prefix.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart { tags: { '!': '' } } +- !Scalar { value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/empty-tag.emitter-error b/_test/data/empty-tag.emitter-error new file mode 100644 index 0000000..b7ca593 --- /dev/null +++ b/_test/data/empty-tag.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart +- !Scalar { tag: '', value: 'key', implicit: [false,false] } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/expected-document-end.emitter-error b/_test/data/expected-document-end.emitter-error new file mode 100644 index 0000000..0cbab89 --- /dev/null +++ b/_test/data/expected-document-end.emitter-error @@ -0,0 +1,6 @@ +- !StreamStart +- !DocumentStart +- !Scalar { value: 'data 1' } +- !Scalar { value: 'data 2' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/expected-document-start.emitter-error b/_test/data/expected-document-start.emitter-error new file mode 100644 index 0000000..8ce575e --- /dev/null +++ b/_test/data/expected-document-start.emitter-error @@ -0,0 +1,4 @@ +- !StreamStart +- !MappingStart +- !MappingEnd +- !StreamEnd diff --git a/_test/data/expected-mapping.loader-error b/_test/data/expected-mapping.loader-error new file mode 100644 index 0000000..82aed98 --- /dev/null +++ b/_test/data/expected-mapping.loader-error @@ -0,0 +1 @@ +--- !!map [not, a, map] diff --git a/_test/data/expected-node-1.emitter-error b/_test/data/expected-node-1.emitter-error new file mode 100644 index 0000000..36ceca3 --- /dev/null +++ b/_test/data/expected-node-1.emitter-error @@ -0,0 +1,4 @@ +- !StreamStart +- !DocumentStart +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/expected-node-2.emitter-error b/_test/data/expected-node-2.emitter-error new file mode 100644 index 0000000..891ee37 --- /dev/null +++ b/_test/data/expected-node-2.emitter-error @@ -0,0 +1,7 @@ +- !StreamStart +- !DocumentStart +- !MappingStart +- !Scalar { value: 'key' } +- !MappingEnd +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/expected-nothing.emitter-error b/_test/data/expected-nothing.emitter-error new file mode 100644 index 0000000..62c54d3 --- /dev/null +++ b/_test/data/expected-nothing.emitter-error @@ -0,0 +1,4 @@ +- !StreamStart +- !StreamEnd +- !StreamStart +- !StreamEnd diff --git a/_test/data/expected-scalar.loader-error b/_test/data/expected-scalar.loader-error new file mode 100644 index 0000000..7b3171e --- /dev/null +++ b/_test/data/expected-scalar.loader-error @@ -0,0 +1 @@ +--- !!str [not a scalar] diff --git a/_test/data/expected-sequence.loader-error b/_test/data/expected-sequence.loader-error new file mode 100644 index 0000000..08074ea --- /dev/null +++ b/_test/data/expected-sequence.loader-error @@ -0,0 +1 @@ +--- !!seq {foo, bar, baz} diff --git a/_test/data/expected-stream-start.emitter-error b/_test/data/expected-stream-start.emitter-error new file mode 100644 index 0000000..480dc2e --- /dev/null +++ b/_test/data/expected-stream-start.emitter-error @@ -0,0 +1,2 @@ +- !DocumentStart +- !DocumentEnd diff --git a/_test/data/explicit-document.single-loader-error b/_test/data/explicit-document.single-loader-error new file mode 100644 index 0000000..46c6f8b --- /dev/null +++ b/_test/data/explicit-document.single-loader-error @@ -0,0 +1,4 @@ +--- +foo: bar +--- +foo: bar diff --git a/_test/data/fetch-complex-value-bug.loader-error b/_test/data/fetch-complex-value-bug.loader-error new file mode 100644 index 0000000..25fac24 --- /dev/null +++ b/_test/data/fetch-complex-value-bug.loader-error @@ -0,0 +1,2 @@ +? "foo" + : "bar" diff --git a/_test/data/float-representer-2.3-bug.code b/_test/data/float-representer-2.3-bug.code new file mode 100644 index 0000000..d8db834 --- /dev/null +++ b/_test/data/float-representer-2.3-bug.code @@ -0,0 +1,7 @@ +{ +# 0.0: 0, + 1.0: 1, + 1e300000: +10, + -1e300000: -10, + 1e300000/1e300000: 100, +} diff --git a/_test/data/float-representer-2.3-bug.data b/_test/data/float-representer-2.3-bug.data new file mode 100644 index 0000000..efd1716 --- /dev/null +++ b/_test/data/float-representer-2.3-bug.data @@ -0,0 +1,5 @@ +#0.0: # hash(0) == hash(nan) and 0 == nan in Python 2.3 +1.0: 1 ++.inf: 10 +-.inf: -10 +.nan: 100 diff --git a/_test/data/float.data b/_test/data/float.data new file mode 100644 index 0000000..524d5db --- /dev/null +++ b/_test/data/float.data @@ -0,0 +1,6 @@ +- 6.8523015e+5 +- 685.230_15e+03 +- 685_230.15 +- 190:20:30.15 +- -.inf +- .NaN diff --git a/_test/data/float.detect b/_test/data/float.detect new file mode 100644 index 0000000..1e12343 --- /dev/null +++ b/_test/data/float.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:float diff --git a/_test/data/forbidden-entry.loader-error b/_test/data/forbidden-entry.loader-error new file mode 100644 index 0000000..f2e3079 --- /dev/null +++ b/_test/data/forbidden-entry.loader-error @@ -0,0 +1,2 @@ +test: - foo + - bar diff --git a/_test/data/forbidden-key.loader-error b/_test/data/forbidden-key.loader-error new file mode 100644 index 0000000..da9b471 --- /dev/null +++ b/_test/data/forbidden-key.loader-error @@ -0,0 +1,2 @@ +test: ? foo + : bar diff --git a/_test/data/forbidden-value.loader-error b/_test/data/forbidden-value.loader-error new file mode 100644 index 0000000..efd7ce5 --- /dev/null +++ b/_test/data/forbidden-value.loader-error @@ -0,0 +1 @@ +test: key: value diff --git a/_test/data/implicit-document.single-loader-error b/_test/data/implicit-document.single-loader-error new file mode 100644 index 0000000..f8c9a5c --- /dev/null +++ b/_test/data/implicit-document.single-loader-error @@ -0,0 +1,3 @@ +foo: bar +--- +foo: bar diff --git a/_test/data/int.data b/_test/data/int.data new file mode 100644 index 0000000..f71d814 --- /dev/null +++ b/_test/data/int.data @@ -0,0 +1,7 @@ +- 685230 +- +685_230 +- 02472256 +- 0o2472256 +- 0x_0A_74_AE +- 0b1010_0111_0100_1010_1110 +- 190:20:30 diff --git a/_test/data/int.detect b/_test/data/int.detect new file mode 100644 index 0000000..575c9eb --- /dev/null +++ b/_test/data/int.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:int diff --git a/_test/data/invalid-anchor-1.loader-error b/_test/data/invalid-anchor-1.loader-error new file mode 100644 index 0000000..fcf7d0f --- /dev/null +++ b/_test/data/invalid-anchor-1.loader-error @@ -0,0 +1 @@ +--- &? foo # we allow only ascii and numeric characters in anchor names. diff --git a/_test/data/invalid-anchor-2.loader-error b/_test/data/invalid-anchor-2.loader-error new file mode 100644 index 0000000..bfc4ff0 --- /dev/null +++ b/_test/data/invalid-anchor-2.loader-error @@ -0,0 +1,8 @@ +--- +- [ + &correct foo, + *correct, + *correct] # still correct +- *correct: still correct +- &correct-or-not[foo, bar] + diff --git a/_test/data/invalid-anchor.emitter-error b/_test/data/invalid-anchor.emitter-error new file mode 100644 index 0000000..3d2a814 --- /dev/null +++ b/_test/data/invalid-anchor.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart +- !Scalar { anchor: '5*5=25', value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/invalid-base64-data-2.loader-error b/_test/data/invalid-base64-data-2.loader-error new file mode 100644 index 0000000..2553a4f --- /dev/null +++ b/_test/data/invalid-base64-data-2.loader-error @@ -0,0 +1,2 @@ +--- !!binary + двоичные данные в base64 diff --git a/_test/data/invalid-base64-data.loader-error b/_test/data/invalid-base64-data.loader-error new file mode 100644 index 0000000..798abba --- /dev/null +++ b/_test/data/invalid-base64-data.loader-error @@ -0,0 +1,2 @@ +--- !!binary + binary data encoded in base64 should be here. diff --git a/_test/data/invalid-block-scalar-indicator.loader-error b/_test/data/invalid-block-scalar-indicator.loader-error new file mode 100644 index 0000000..16a6db1 --- /dev/null +++ b/_test/data/invalid-block-scalar-indicator.loader-error @@ -0,0 +1,2 @@ +--- > what is this? # a comment +data diff --git a/_test/data/invalid-character.loader-error b/_test/data/invalid-character.loader-error new file mode 100644 index 0000000..03687b0 Binary files /dev/null and b/_test/data/invalid-character.loader-error differ diff --git a/_test/data/invalid-character.stream-error b/_test/data/invalid-character.stream-error new file mode 100644 index 0000000..171face Binary files /dev/null and b/_test/data/invalid-character.stream-error differ diff --git a/_test/data/invalid-directive-line.loader-error b/_test/data/invalid-directive-line.loader-error new file mode 100644 index 0000000..0892eb6 --- /dev/null +++ b/_test/data/invalid-directive-line.loader-error @@ -0,0 +1,2 @@ +%YAML 1.1 ? # extra symbol +--- diff --git a/_test/data/invalid-directive-name-1.loader-error b/_test/data/invalid-directive-name-1.loader-error new file mode 100644 index 0000000..153fd88 --- /dev/null +++ b/_test/data/invalid-directive-name-1.loader-error @@ -0,0 +1,2 @@ +% # no name at all +--- diff --git a/_test/data/invalid-directive-name-2.loader-error b/_test/data/invalid-directive-name-2.loader-error new file mode 100644 index 0000000..3732a06 --- /dev/null +++ b/_test/data/invalid-directive-name-2.loader-error @@ -0,0 +1,2 @@ +%invalid-characters:in-directive name +--- diff --git a/_test/data/invalid-escape-character.loader-error b/_test/data/invalid-escape-character.loader-error new file mode 100644 index 0000000..a95ab76 --- /dev/null +++ b/_test/data/invalid-escape-character.loader-error @@ -0,0 +1 @@ +"some escape characters are \ncorrect, but this one \?\nis not\n" diff --git a/_test/data/invalid-escape-numbers.loader-error b/_test/data/invalid-escape-numbers.loader-error new file mode 100644 index 0000000..614ec9f --- /dev/null +++ b/_test/data/invalid-escape-numbers.loader-error @@ -0,0 +1 @@ +"hm.... \u123?" diff --git a/_test/data/invalid-indentation-indicator-1.loader-error b/_test/data/invalid-indentation-indicator-1.loader-error new file mode 100644 index 0000000..a3cd12f --- /dev/null +++ b/_test/data/invalid-indentation-indicator-1.loader-error @@ -0,0 +1,2 @@ +--- >0 # not valid +data diff --git a/_test/data/invalid-indentation-indicator-2.loader-error b/_test/data/invalid-indentation-indicator-2.loader-error new file mode 100644 index 0000000..eefb6ec --- /dev/null +++ b/_test/data/invalid-indentation-indicator-2.loader-error @@ -0,0 +1,2 @@ +--- >-0 +data diff --git a/_test/data/invalid-item-without-trailing-break.loader-error b/_test/data/invalid-item-without-trailing-break.loader-error new file mode 100644 index 0000000..fdcf6c6 --- /dev/null +++ b/_test/data/invalid-item-without-trailing-break.loader-error @@ -0,0 +1,2 @@ +- +-0 \ No newline at end of file diff --git a/_test/data/invalid-merge-1.loader-error b/_test/data/invalid-merge-1.loader-error new file mode 100644 index 0000000..fc3c284 --- /dev/null +++ b/_test/data/invalid-merge-1.loader-error @@ -0,0 +1,2 @@ +foo: bar +<<: baz diff --git a/_test/data/invalid-merge-2.loader-error b/_test/data/invalid-merge-2.loader-error new file mode 100644 index 0000000..8e88615 --- /dev/null +++ b/_test/data/invalid-merge-2.loader-error @@ -0,0 +1,2 @@ +foo: bar +<<: [x: 1, y: 2, z, t: 4] diff --git a/_test/data/invalid-omap-1.loader-error b/_test/data/invalid-omap-1.loader-error new file mode 100644 index 0000000..2863392 --- /dev/null +++ b/_test/data/invalid-omap-1.loader-error @@ -0,0 +1,3 @@ +--- !!omap +foo: bar +baz: bat diff --git a/_test/data/invalid-omap-2.loader-error b/_test/data/invalid-omap-2.loader-error new file mode 100644 index 0000000..c377dfb --- /dev/null +++ b/_test/data/invalid-omap-2.loader-error @@ -0,0 +1,3 @@ +--- !!omap +- foo: bar +- baz diff --git a/_test/data/invalid-omap-3.loader-error b/_test/data/invalid-omap-3.loader-error new file mode 100644 index 0000000..2a4f50d --- /dev/null +++ b/_test/data/invalid-omap-3.loader-error @@ -0,0 +1,4 @@ +--- !!omap +- foo: bar +- baz: bar + bar: bar diff --git a/_test/data/invalid-pairs-1.loader-error b/_test/data/invalid-pairs-1.loader-error new file mode 100644 index 0000000..42d19ae --- /dev/null +++ b/_test/data/invalid-pairs-1.loader-error @@ -0,0 +1,3 @@ +--- !!pairs +foo: bar +baz: bat diff --git a/_test/data/invalid-pairs-2.loader-error b/_test/data/invalid-pairs-2.loader-error new file mode 100644 index 0000000..31389ea --- /dev/null +++ b/_test/data/invalid-pairs-2.loader-error @@ -0,0 +1,3 @@ +--- !!pairs +- foo: bar +- baz diff --git a/_test/data/invalid-pairs-3.loader-error b/_test/data/invalid-pairs-3.loader-error new file mode 100644 index 0000000..f8d7704 --- /dev/null +++ b/_test/data/invalid-pairs-3.loader-error @@ -0,0 +1,4 @@ +--- !!pairs +- foo: bar +- baz: bar + bar: bar diff --git a/_test/data/invalid-python-bytes-2-py3.loader-error b/_test/data/invalid-python-bytes-2-py3.loader-error new file mode 100644 index 0000000..f43af59 --- /dev/null +++ b/_test/data/invalid-python-bytes-2-py3.loader-error @@ -0,0 +1,2 @@ +--- !!python/bytes + двоичные данные в base64 diff --git a/_test/data/invalid-python-bytes-py3.loader-error b/_test/data/invalid-python-bytes-py3.loader-error new file mode 100644 index 0000000..a19dfd0 --- /dev/null +++ b/_test/data/invalid-python-bytes-py3.loader-error @@ -0,0 +1,2 @@ +--- !!python/bytes + binary data encoded in base64 should be here. diff --git a/_test/data/invalid-python-module-kind.loader-error b/_test/data/invalid-python-module-kind.loader-error new file mode 100644 index 0000000..4f71cb5 --- /dev/null +++ b/_test/data/invalid-python-module-kind.loader-error @@ -0,0 +1 @@ +--- !!python/module:sys { must, be, scalar } diff --git a/_test/data/invalid-python-module-value.loader-error b/_test/data/invalid-python-module-value.loader-error new file mode 100644 index 0000000..f6797fc --- /dev/null +++ b/_test/data/invalid-python-module-value.loader-error @@ -0,0 +1 @@ +--- !!python/module:sys "non-empty value" diff --git a/_test/data/invalid-python-module.loader-error b/_test/data/invalid-python-module.loader-error new file mode 100644 index 0000000..4e24072 --- /dev/null +++ b/_test/data/invalid-python-module.loader-error @@ -0,0 +1 @@ +--- !!python/module:no.such.module diff --git a/_test/data/invalid-python-name-kind.loader-error b/_test/data/invalid-python-name-kind.loader-error new file mode 100644 index 0000000..6ff8eb6 --- /dev/null +++ b/_test/data/invalid-python-name-kind.loader-error @@ -0,0 +1 @@ +--- !!python/name:sys.modules {} diff --git a/_test/data/invalid-python-name-module-2.loader-error b/_test/data/invalid-python-name-module-2.loader-error new file mode 100644 index 0000000..debc313 --- /dev/null +++ b/_test/data/invalid-python-name-module-2.loader-error @@ -0,0 +1 @@ +--- !!python/name:xml.parsers diff --git a/_test/data/invalid-python-name-module.loader-error b/_test/data/invalid-python-name-module.loader-error new file mode 100644 index 0000000..1966f6a --- /dev/null +++ b/_test/data/invalid-python-name-module.loader-error @@ -0,0 +1 @@ +--- !!python/name:sys.modules.keys diff --git a/_test/data/invalid-python-name-object.loader-error b/_test/data/invalid-python-name-object.loader-error new file mode 100644 index 0000000..50f386f --- /dev/null +++ b/_test/data/invalid-python-name-object.loader-error @@ -0,0 +1 @@ +--- !!python/name:os.path.rm_rf diff --git a/_test/data/invalid-python-name-value.loader-error b/_test/data/invalid-python-name-value.loader-error new file mode 100644 index 0000000..7be1401 --- /dev/null +++ b/_test/data/invalid-python-name-value.loader-error @@ -0,0 +1 @@ +--- !!python/name:sys.modules 5 diff --git a/_test/data/invalid-simple-key.loader-error b/_test/data/invalid-simple-key.loader-error new file mode 100644 index 0000000..a58deec --- /dev/null +++ b/_test/data/invalid-simple-key.loader-error @@ -0,0 +1,3 @@ +key: value +invalid simple key +next key: next value diff --git a/_test/data/invalid-single-quote-bug.code b/_test/data/invalid-single-quote-bug.code new file mode 100644 index 0000000..5558945 --- /dev/null +++ b/_test/data/invalid-single-quote-bug.code @@ -0,0 +1 @@ +["foo 'bar'", "foo\n'bar'"] diff --git a/_test/data/invalid-single-quote-bug.data b/_test/data/invalid-single-quote-bug.data new file mode 100644 index 0000000..76ef7ae --- /dev/null +++ b/_test/data/invalid-single-quote-bug.data @@ -0,0 +1,2 @@ +- "foo 'bar'" +- "foo\n'bar'" diff --git a/_test/data/invalid-starting-character.loader-error b/_test/data/invalid-starting-character.loader-error new file mode 100644 index 0000000..bb81c60 --- /dev/null +++ b/_test/data/invalid-starting-character.loader-error @@ -0,0 +1 @@ +@@@@@@@@@@@@@@@@@@@ diff --git a/_test/data/invalid-tag-1.loader-error b/_test/data/invalid-tag-1.loader-error new file mode 100644 index 0000000..a68cd38 --- /dev/null +++ b/_test/data/invalid-tag-1.loader-error @@ -0,0 +1 @@ +- ! baz diff --git a/_test/data/invalid-tag-2.loader-error b/_test/data/invalid-tag-2.loader-error new file mode 100644 index 0000000..3a36700 --- /dev/null +++ b/_test/data/invalid-tag-2.loader-error @@ -0,0 +1 @@ +- !prefix!foo#bar baz diff --git a/_test/data/invalid-tag-directive-handle.loader-error b/_test/data/invalid-tag-directive-handle.loader-error new file mode 100644 index 0000000..42b5d7e --- /dev/null +++ b/_test/data/invalid-tag-directive-handle.loader-error @@ -0,0 +1,2 @@ +%TAG !!! !!! +--- diff --git a/_test/data/invalid-tag-directive-prefix.loader-error b/_test/data/invalid-tag-directive-prefix.loader-error new file mode 100644 index 0000000..0cb482c --- /dev/null +++ b/_test/data/invalid-tag-directive-prefix.loader-error @@ -0,0 +1,2 @@ +%TAG ! tag:zz.com/foo#bar # '#' is not allowed in URLs +--- diff --git a/_test/data/invalid-tag-handle-1.emitter-error b/_test/data/invalid-tag-handle-1.emitter-error new file mode 100644 index 0000000..d5df9a2 --- /dev/null +++ b/_test/data/invalid-tag-handle-1.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart { tags: { '!foo': 'bar' } } +- !Scalar { value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/invalid-tag-handle-1.loader-error b/_test/data/invalid-tag-handle-1.loader-error new file mode 100644 index 0000000..ef0d143 --- /dev/null +++ b/_test/data/invalid-tag-handle-1.loader-error @@ -0,0 +1,2 @@ +%TAG foo bar +--- diff --git a/_test/data/invalid-tag-handle-2.emitter-error b/_test/data/invalid-tag-handle-2.emitter-error new file mode 100644 index 0000000..d1831d5 --- /dev/null +++ b/_test/data/invalid-tag-handle-2.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart { tags: { '!!!': 'bar' } } +- !Scalar { value: 'foo' } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/invalid-tag-handle-2.loader-error b/_test/data/invalid-tag-handle-2.loader-error new file mode 100644 index 0000000..06c7f0e --- /dev/null +++ b/_test/data/invalid-tag-handle-2.loader-error @@ -0,0 +1,2 @@ +%TAG !foo bar +--- diff --git a/_test/data/invalid-uri-escapes-1.loader-error b/_test/data/invalid-uri-escapes-1.loader-error new file mode 100644 index 0000000..a6ecb36 --- /dev/null +++ b/_test/data/invalid-uri-escapes-1.loader-error @@ -0,0 +1 @@ +--- ! foo diff --git a/_test/data/invalid-uri-escapes-2.loader-error b/_test/data/invalid-uri-escapes-2.loader-error new file mode 100644 index 0000000..b89e8f6 --- /dev/null +++ b/_test/data/invalid-uri-escapes-2.loader-error @@ -0,0 +1 @@ +--- !<%FF> foo diff --git a/_test/data/invalid-uri-escapes-3.loader-error b/_test/data/invalid-uri-escapes-3.loader-error new file mode 100644 index 0000000..f2e4cb8 --- /dev/null +++ b/_test/data/invalid-uri-escapes-3.loader-error @@ -0,0 +1 @@ +--- ! baz diff --git a/_test/data/invalid-uri.loader-error b/_test/data/invalid-uri.loader-error new file mode 100644 index 0000000..06307e0 --- /dev/null +++ b/_test/data/invalid-uri.loader-error @@ -0,0 +1 @@ +--- !foo! bar diff --git a/_test/data/invalid-utf8-byte.loader-error b/_test/data/invalid-utf8-byte.loader-error new file mode 100644 index 0000000..0a58c70 --- /dev/null +++ b/_test/data/invalid-utf8-byte.loader-error @@ -0,0 +1,66 @@ +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +Invalid byte ('\xFF'): ÿ <-- +############################################################### diff --git a/_test/data/invalid-utf8-byte.stream-error b/_test/data/invalid-utf8-byte.stream-error new file mode 100644 index 0000000..0a58c70 --- /dev/null +++ b/_test/data/invalid-utf8-byte.stream-error @@ -0,0 +1,66 @@ +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +############################################################### +Invalid byte ('\xFF'): ÿ <-- +############################################################### diff --git a/_test/data/invalid-yaml-directive-version-1.loader-error b/_test/data/invalid-yaml-directive-version-1.loader-error new file mode 100644 index 0000000..e9b4e3a --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-1.loader-error @@ -0,0 +1,3 @@ +# No version at all. +%YAML +--- diff --git a/_test/data/invalid-yaml-directive-version-2.loader-error b/_test/data/invalid-yaml-directive-version-2.loader-error new file mode 100644 index 0000000..6aa7740 --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-2.loader-error @@ -0,0 +1,2 @@ +%YAML 1e-5 +--- diff --git a/_test/data/invalid-yaml-directive-version-3.loader-error b/_test/data/invalid-yaml-directive-version-3.loader-error new file mode 100644 index 0000000..345e784 --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-3.loader-error @@ -0,0 +1,2 @@ +%YAML 1. +--- diff --git a/_test/data/invalid-yaml-directive-version-4.loader-error b/_test/data/invalid-yaml-directive-version-4.loader-error new file mode 100644 index 0000000..b35ca82 --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-4.loader-error @@ -0,0 +1,2 @@ +%YAML 1.132.435 +--- diff --git a/_test/data/invalid-yaml-directive-version-5.loader-error b/_test/data/invalid-yaml-directive-version-5.loader-error new file mode 100644 index 0000000..7c2b49f --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-5.loader-error @@ -0,0 +1,2 @@ +%YAML A.0 +--- diff --git a/_test/data/invalid-yaml-directive-version-6.loader-error b/_test/data/invalid-yaml-directive-version-6.loader-error new file mode 100644 index 0000000..bae714f --- /dev/null +++ b/_test/data/invalid-yaml-directive-version-6.loader-error @@ -0,0 +1,2 @@ +%YAML 123.C +--- diff --git a/_test/data/invalid-yaml-version.loader-error b/_test/data/invalid-yaml-version.loader-error new file mode 100644 index 0000000..dd01948 --- /dev/null +++ b/_test/data/invalid-yaml-version.loader-error @@ -0,0 +1,2 @@ +%YAML 2.0 +--- foo diff --git a/_test/data/latin.unicode b/_test/data/latin.unicode new file mode 100644 index 0000000..4fb799c --- /dev/null +++ b/_test/data/latin.unicode @@ -0,0 +1,384 @@ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ +ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµºÀÃÂÃÄÅÆÇÈÉÊ +ËÌÃÃŽÃÃÑÒÓÔÕÖØÙÚÛÜÃÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿĀÄĂ㥹ĆćĈĉĊċČÄÄŽ +ÄÄđĒēĔĕĖėĘęĚěĜÄĞğĠġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀÅłŃńŅņŇňʼnŊŋŌÅÅŽÅÅ +őŒœŔŕŖŗŘřŚśŜÅŞşŠšŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſƀÆƂƃƄƅƆƇƈƉƊƋƌÆÆŽÆÆÆ‘Æ’ +ƓƔƕƖƗƘƙƚƛƜÆƞƟƠơƢƣƤƥƦƧƨƩƪƫƬƭƮƯưƱƲƳƴƵƶƷƸƹƺƼƽƾƿDŽdžLJljNJnjÇÇŽÇÇǑǒǓǔǕǖǗǘǙǚǛǜ +ÇǞǟǠǡǢǣǤǥǦǧǨǩǪǫǬǭǮǯǰDZdzǴǵǶǷǸǹǺǻǼǽǾǿȀÈȂȃȄȅȆȇȈȉȊȋȌÈÈŽÈÈȑȒȓȔȕȖȗȘșȚțȜÈȞȟ +ȠȡȢȣȤȥȦȧȨȩȪȫȬȭȮȯȰȱȲȳȴȵȶȷȸȹȺȻȼȽȾȿɀÉÉɑɒɓɔɕɖɗɘəɚɛɜÉɞɟɠɡɢɣɤɥɦɧɨɩɪɫɬɭɮɯ +ɰɱɲɳɴɵɶɷɸɹɺɻɼɽɾɿʀÊʂʃʄʅʆʇʈʉʊʋʌÊÊŽÊÊʑʒʓʔʕʖʗʘʙʚʛʜÊʞʟʠʡʢʣʤʥʦʧʨʩʪʫʬʭʮʯΆΈ +ΉΊΌΎÎÎΑΒΓΔΕΖΗΘΙΚΛΜÎΞΟΠΡΣΤΥΦΧΨΩΪΫάέήίΰαβγδεζηθικλμνξοπÏÏ‚ÏƒÏ„Ï…Ï†Ï‡ÏˆÏ‰ÏŠÏ‹ÏŒÏ +ÏŽÏϑϒϓϔϕϖϗϘϙϚϛϜÏϞϟϠϡϢϣϤϥϦϧϨϩϪϫϬϭϮϯϰϱϲϳϴϵϷϸϹϺϻϼϽϾϿЀÐЂЃЄЅІЇЈЉЊЋЌÐÐŽÐÐБ +ВГДЕЖЗИЙКЛМÐОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрÑтуфхцчшщъыьÑÑŽÑÑÑ‘Ñ’Ñ“ +єѕіїјљњћќÑўџѠѡѢѣѤѥѦѧѨѩѪѫѬѭѮѯѰѱѲѳѴѵѶѷѸѹѺѻѼѽѾѿҀÒÒŠÒ‹ÒŒÒÒŽÒÒÒ‘Ò’Ò“Ò”Ò•Ò–Ò—Ò˜Ò™ÒšÒ›ÒœÒ +ÒžÒŸÒ Ò¡Ò¢Ò£Ò¤Ò¥Ò¦Ò§Ò¨Ò©ÒªÒ«Ò¬Ò­Ò®Ò¯Ò°Ò±Ò²Ò³Ò´ÒµÒ¶Ò·Ò¸Ò¹ÒºÒ»Ò¼Ò½Ò¾Ò¿Ó€ÓÓ‚ÓƒÓ„Ó…Ó†Ó‡ÓˆÓ‰ÓŠÓ‹ÓŒÓÓŽÓÓ‘Ó’Ó“Ó”Ó•Ó–Ó—Ó˜Ó™ÓšÓ›ÓœÓÓžÓŸÓ  +Ó¡Ó¢Ó£Ó¤Ó¥Ó¦Ó§Ó¨Ó©ÓªÓ«Ó¬Ó­Ó®Ó¯Ó°Ó±Ó²Ó³Ó´ÓµÓ¶Ó·Ó¸Ó¹Ô€ÔÔ‚ÔƒÔ„Ô…Ô†Ô‡ÔˆÔ‰ÔŠÔ‹ÔŒÔÔŽÔÔ±Ô²Ô³Ô´ÔµÔ¶Ô·Ô¸Ô¹ÔºÔ»Ô¼Ô½Ô¾Ô¿Õ€ÕÕ‚ÕƒÕ„Õ…Õ†Õ‡ÕˆÕ‰ +ÕŠÕ‹ÕŒÕÕŽÕÕÕ‘Õ’Õ“Õ”Õ•Õ–Õ¡Õ¢Õ£Õ¤Õ¥Õ¦Õ§Õ¨Õ©ÕªÕ«Õ¬Õ­Õ®Õ¯Õ°Õ±Õ²Õ³Õ´ÕµÕ¶Õ·Õ¸Õ¹ÕºÕ»Õ¼Õ½Õ¾Õ¿Ö€ÖւփքօֆևႠႡႢႣႤႥႦႧႨႩႪႫႬႭ +ႮႯႰႱႲႳႴႵႶႷႸႹႺႻႼႽႾႿჀáƒáƒ‚ჃჄჅᴀá´á´‚ᴃᴄᴅᴆᴇᴈᴉᴊᴋᴌá´á´Žá´á´á´‘ᴒᴓᴔᴕᴖᴗᴘᴙᴚᴛᴜá´á´žá´Ÿá´ á´¡á´¢á´£á´¤á´¥á´¦á´§á´¨á´© +ᴪᴫᵢᵣᵤᵥᵦᵧᵨᵩᵪᵫᵬᵭᵮᵯᵰᵱᵲᵳᵴᵵᵶᵷᵹᵺᵻᵼᵽᵾᵿᶀá¶á¶‚ᶃᶄᶅᶆᶇᶈᶉᶊᶋᶌá¶á¶Žá¶á¶á¶‘ᶒᶓᶔᶕᶖᶗᶘᶙᶚḀá¸á¸‚ḃḄḅḆḇ +ḈḉḊḋḌá¸á¸Žá¸á¸á¸‘ḒḓḔḕḖḗḘḙḚḛḜá¸á¸žá¸Ÿá¸ á¸¡á¸¢á¸£á¸¤á¸¥á¸¦á¸§á¸¨á¸©á¸ªá¸«á¸¬á¸­á¸®á¸¯á¸°á¸±á¸²á¸³á¸´á¸µá¸¶á¸·á¸¸á¸¹á¸ºá¸»á¸¼á¸½á¸¾á¸¿á¹€á¹á¹‚ṃṄṅṆṇṈṉ +ṊṋṌá¹á¹Žá¹á¹á¹‘ṒṓṔṕṖṗṘṙṚṛṜá¹á¹žá¹Ÿá¹ á¹¡á¹¢á¹£á¹¤á¹¥á¹¦á¹§á¹¨á¹©á¹ªá¹«á¹¬á¹­á¹®á¹¯á¹°á¹±á¹²á¹³á¹´á¹µá¹¶á¹·á¹¸á¹¹á¹ºá¹»á¹¼á¹½á¹¾á¹¿áº€áºáº‚ẃẄẅẆẇẈẉẊẋ +ẌáºáºŽáºáºáº‘ẒẓẔẕẖẗẘẙẚẛẠạẢảẤấẦầẨẩẪẫẬậẮắẰằẲẳẴẵẶặẸẹẺẻẼẽẾếỀá»á»‚ểỄễỆệỈỉỊịỌá»á»Žá»á»á»‘ +ỒồỔổỖỗỘộỚớỜá»á»žá»Ÿá» á»¡á»¢á»£á»¤á»¥á»¦á»§á»¨á»©á»ªá»«á»¬á»­á»®á»¯á»°á»±á»²á»³á»´á»µá»¶á»·á»¸á»¹á¼€á¼á¼‚ἃἄἅἆἇἈἉἊἋἌá¼á¼Žá¼á¼á¼‘ἒἓἔἕἘἙἚἛ +Ἔá¼á¼ á¼¡á¼¢á¼£á¼¤á¼¥á¼¦á¼§á¼¨á¼©á¼ªá¼«á¼¬á¼­á¼®á¼¯á¼°á¼±á¼²á¼³á¼´á¼µá¼¶á¼·á¼¸á¼¹á¼ºá¼»á¼¼á¼½á¼¾á¼¿á½€á½á½‚ὃὄὅὈὉὊὋὌá½á½á½‘ὒὓὔὕὖὗὙὛá½á½Ÿá½ á½¡á½¢á½£á½¤á½¥á½¦á½§ +ὨὩὪὫὬὭὮὯὰάὲέὴήὶίὸόὺύὼώᾀá¾á¾‚ᾃᾄᾅᾆᾇá¾á¾‘ᾒᾓᾔᾕᾖᾗᾠᾡᾢᾣᾤᾥᾦᾧᾰᾱᾲᾳᾴᾶᾷᾸᾹᾺΆιῂῃῄῆῇῈΈῊ +á¿‹á¿á¿‘ῒΐῖῗῘῙῚΊῠῡῢΰῤῥῦῧῨῩῪΎῬῲῳῴῶῷῸΌῺΏâ±â¿â„‚ℇℊℋℌâ„â„Žâ„â„ℑℒℓℕℙℚℛℜâ„ℤΩℨKÅℬℭℯℰℱℳℴℹ diff --git a/_test/data/mappings.events b/_test/data/mappings.events new file mode 100644 index 0000000..3cb5579 --- /dev/null +++ b/_test/data/mappings.events @@ -0,0 +1,44 @@ +- !StreamStart + +- !DocumentStart +- !MappingStart +- !Scalar { implicit: [true,true], value: 'key' } +- !Scalar { implicit: [true,true], value: 'value' } +- !Scalar { implicit: [true,true], value: 'empty mapping' } +- !MappingStart +- !MappingEnd +- !Scalar { implicit: [true,true], value: 'empty mapping with tag' } +- !MappingStart { tag: '!mytag', implicit: false } +- !MappingEnd +- !Scalar { implicit: [true,true], value: 'block mapping' } +- !MappingStart +- !MappingStart +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !MappingEnd +- !MappingStart +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !MappingEnd +- !MappingEnd +- !Scalar { implicit: [true,true], value: 'flow mapping' } +- !MappingStart { flow_style: true } +- !Scalar { implicit: [true,true], value: 'key' } +- !Scalar { implicit: [true,true], value: 'value' } +- !MappingStart +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !MappingEnd +- !MappingStart +- !Scalar { implicit: [true,true], value: 'complex' } +- !Scalar { implicit: [true,true], value: 'key' } +- !MappingEnd +- !MappingEnd +- !MappingEnd +- !DocumentEnd + +- !StreamEnd diff --git a/_test/data/merge.data b/_test/data/merge.data new file mode 100644 index 0000000..e455bbc --- /dev/null +++ b/_test/data/merge.data @@ -0,0 +1 @@ +- << diff --git a/_test/data/merge.detect b/_test/data/merge.detect new file mode 100644 index 0000000..1672d0d --- /dev/null +++ b/_test/data/merge.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:merge diff --git a/_test/data/more-floats.code b/_test/data/more-floats.code new file mode 100644 index 0000000..e3e444e --- /dev/null +++ b/_test/data/more-floats.code @@ -0,0 +1 @@ +[0.0, +1.0, -1.0, +1e300000, -1e300000, 1e300000/1e300000, -(1e300000/1e300000)] # last two items are ind and qnan respectively. diff --git a/_test/data/more-floats.data b/_test/data/more-floats.data new file mode 100644 index 0000000..399eb17 --- /dev/null +++ b/_test/data/more-floats.data @@ -0,0 +1 @@ +[0.0, +1.0, -1.0, +.inf, -.inf, .nan, .nan] diff --git a/_test/data/negative-float-bug.code b/_test/data/negative-float-bug.code new file mode 100644 index 0000000..18e16e3 --- /dev/null +++ b/_test/data/negative-float-bug.code @@ -0,0 +1 @@ +-1.0 diff --git a/_test/data/negative-float-bug.data b/_test/data/negative-float-bug.data new file mode 100644 index 0000000..18e16e3 --- /dev/null +++ b/_test/data/negative-float-bug.data @@ -0,0 +1 @@ +-1.0 diff --git a/_test/data/no-alias-anchor.emitter-error b/_test/data/no-alias-anchor.emitter-error new file mode 100644 index 0000000..5ff065c --- /dev/null +++ b/_test/data/no-alias-anchor.emitter-error @@ -0,0 +1,8 @@ +- !StreamStart +- !DocumentStart +- !SequenceStart +- !Scalar { anchor: A, value: data } +- !Alias { } +- !SequenceEnd +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/no-alias-anchor.skip-ext b/_test/data/no-alias-anchor.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/no-block-collection-end.loader-error b/_test/data/no-block-collection-end.loader-error new file mode 100644 index 0000000..02d4d37 --- /dev/null +++ b/_test/data/no-block-collection-end.loader-error @@ -0,0 +1,3 @@ +- foo +- bar +baz: bar diff --git a/_test/data/no-block-mapping-end-2.loader-error b/_test/data/no-block-mapping-end-2.loader-error new file mode 100644 index 0000000..be63571 --- /dev/null +++ b/_test/data/no-block-mapping-end-2.loader-error @@ -0,0 +1,3 @@ +? foo +: bar +: baz diff --git a/_test/data/no-block-mapping-end.loader-error b/_test/data/no-block-mapping-end.loader-error new file mode 100644 index 0000000..1ea921c --- /dev/null +++ b/_test/data/no-block-mapping-end.loader-error @@ -0,0 +1 @@ +foo: "bar" "baz" diff --git a/_test/data/no-document-start.loader-error b/_test/data/no-document-start.loader-error new file mode 100644 index 0000000..c725ec8 --- /dev/null +++ b/_test/data/no-document-start.loader-error @@ -0,0 +1,3 @@ +%YAML 1.1 +# no --- +foo: bar diff --git a/_test/data/no-flow-mapping-end.loader-error b/_test/data/no-flow-mapping-end.loader-error new file mode 100644 index 0000000..8bd1403 --- /dev/null +++ b/_test/data/no-flow-mapping-end.loader-error @@ -0,0 +1 @@ +{ foo: bar ] diff --git a/_test/data/no-flow-sequence-end.loader-error b/_test/data/no-flow-sequence-end.loader-error new file mode 100644 index 0000000..750d973 --- /dev/null +++ b/_test/data/no-flow-sequence-end.loader-error @@ -0,0 +1 @@ +[foo, bar} diff --git a/_test/data/no-node-1.loader-error b/_test/data/no-node-1.loader-error new file mode 100644 index 0000000..07b1500 --- /dev/null +++ b/_test/data/no-node-1.loader-error @@ -0,0 +1 @@ +- !foo ] diff --git a/_test/data/no-node-2.loader-error b/_test/data/no-node-2.loader-error new file mode 100644 index 0000000..563e3b3 --- /dev/null +++ b/_test/data/no-node-2.loader-error @@ -0,0 +1 @@ +- [ !foo } ] diff --git a/_test/data/no-tag.emitter-error b/_test/data/no-tag.emitter-error new file mode 100644 index 0000000..384c62f --- /dev/null +++ b/_test/data/no-tag.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart +- !Scalar { value: 'foo', implicit: [false,false] } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/null.data b/_test/data/null.data new file mode 100644 index 0000000..ad12528 --- /dev/null +++ b/_test/data/null.data @@ -0,0 +1,3 @@ +- +- ~ +- null diff --git a/_test/data/null.detect b/_test/data/null.detect new file mode 100644 index 0000000..19110c7 --- /dev/null +++ b/_test/data/null.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:null diff --git a/_test/data/odd-utf16.stream-error b/_test/data/odd-utf16.stream-error new file mode 100644 index 0000000..b59e434 Binary files /dev/null and b/_test/data/odd-utf16.stream-error differ diff --git a/_test/data/omap.data b/_test/data/omap.data new file mode 100644 index 0000000..b366fbc --- /dev/null +++ b/_test/data/omap.data @@ -0,0 +1,8 @@ +Bestiary: !!omap +- aardvark: African pig-like ant eater. Ugly. +- anteater: South-American ant eater. Two species. +- anaconda: South-American constrictor snake. Scaly. +Numbers: !!omap +- one: 1 +- two: 2 +- three: 3 diff --git a/_test/data/omap.roundtrip b/_test/data/omap.roundtrip new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/recursive-anchor.former-loader-error b/_test/data/recursive-anchor.former-loader-error new file mode 100644 index 0000000..661166c --- /dev/null +++ b/_test/data/recursive-anchor.former-loader-error @@ -0,0 +1,4 @@ +- &foo [1 + 2, + 3, + *foo] diff --git a/_test/data/recursive-dict.recursive b/_test/data/recursive-dict.recursive new file mode 100644 index 0000000..8f326f5 --- /dev/null +++ b/_test/data/recursive-dict.recursive @@ -0,0 +1,3 @@ +value = {} +instance = AnInstance(value, value) +value[instance] = instance diff --git a/_test/data/recursive-list.recursive b/_test/data/recursive-list.recursive new file mode 100644 index 0000000..27a4ae5 --- /dev/null +++ b/_test/data/recursive-list.recursive @@ -0,0 +1,2 @@ +value = [] +value.append(value) diff --git a/_test/data/recursive-set.recursive b/_test/data/recursive-set.recursive new file mode 100644 index 0000000..457c50d --- /dev/null +++ b/_test/data/recursive-set.recursive @@ -0,0 +1,7 @@ +try: + set +except NameError: + from sets import Set as set +value = set() +value.add(AnInstance(foo=value, bar=value)) +value.add(AnInstance(foo=value, bar=value)) diff --git a/_test/data/recursive-state.recursive b/_test/data/recursive-state.recursive new file mode 100644 index 0000000..bffe61e --- /dev/null +++ b/_test/data/recursive-state.recursive @@ -0,0 +1,2 @@ +value = [] +value.append(AnInstanceWithState(value, value)) diff --git a/_test/data/recursive-tuple.recursive b/_test/data/recursive-tuple.recursive new file mode 100644 index 0000000..dc08d02 --- /dev/null +++ b/_test/data/recursive-tuple.recursive @@ -0,0 +1,3 @@ +value = ([], []) +value[0].append(value) +value[1].append(value[0]) diff --git a/_test/data/recursive.former-dumper-error b/_test/data/recursive.former-dumper-error new file mode 100644 index 0000000..3c7cc2f --- /dev/null +++ b/_test/data/recursive.former-dumper-error @@ -0,0 +1,3 @@ +data = [] +data.append(data) +dump(data) diff --git a/_test/data/remove-possible-simple-key-bug.loader-error b/_test/data/remove-possible-simple-key-bug.loader-error new file mode 100644 index 0000000..fe1bc6c --- /dev/null +++ b/_test/data/remove-possible-simple-key-bug.loader-error @@ -0,0 +1,3 @@ +foo: &A bar +*A ] # The ']' indicator triggers remove_possible_simple_key, + # which should raise an error. diff --git a/_test/data/resolver.data b/_test/data/resolver.data new file mode 100644 index 0000000..a296404 --- /dev/null +++ b/_test/data/resolver.data @@ -0,0 +1,30 @@ +--- +"this scalar should be selected" +--- +key11: !foo + key12: + is: [selected] + key22: + key13: [not, selected] + key23: [not, selected] + key32: + key31: [not, selected] + key32: [not, selected] + key33: {not: selected} +key21: !bar + - not selected + - selected + - not selected +key31: !baz + key12: + key13: + key14: {selected} + key23: + key14: [not, selected] + key33: + key14: {selected} + key24: {not: selected} + key22: + - key14: {selected} + key24: {not: selected} + - key14: {selected} diff --git a/_test/data/resolver.path b/_test/data/resolver.path new file mode 100644 index 0000000..ec677d2 --- /dev/null +++ b/_test/data/resolver.path @@ -0,0 +1,30 @@ +--- !root/scalar +"this scalar should be selected" +--- !root +key11: !foo + key12: !root/key11/key12/* + is: [selected] + key22: + key13: [not, selected] + key23: [not, selected] + key32: + key31: [not, selected] + key32: [not, selected] + key33: {not: selected} +key21: !bar + - not selected + - !root/key21/1/* selected + - not selected +key31: !baz + key12: + key13: + key14: !root/key31/*/*/key14/map {selected} + key23: + key14: [not, selected] + key33: + key14: !root/key31/*/*/key14/map {selected} + key24: {not: selected} + key22: + - key14: !root/key31/*/*/key14/map {selected} + key24: {not: selected} + - key14: !root/key31/*/*/key14/map {selected} diff --git a/_test/data/run-parser-crash-bug.data b/_test/data/run-parser-crash-bug.data new file mode 100644 index 0000000..fe01734 --- /dev/null +++ b/_test/data/run-parser-crash-bug.data @@ -0,0 +1,8 @@ +--- +- Harry Potter and the Prisoner of Azkaban +- Harry Potter and the Goblet of Fire +- Harry Potter and the Order of the Phoenix +--- +- Memoirs Found in a Bathtub +- Snow Crash +- Ghost World diff --git a/_test/data/scalars.events b/_test/data/scalars.events new file mode 100644 index 0000000..32c40f4 --- /dev/null +++ b/_test/data/scalars.events @@ -0,0 +1,28 @@ +- !StreamStart + +- !DocumentStart +- !MappingStart +- !Scalar { implicit: [true,true], value: 'empty scalar' } +- !Scalar { implicit: [true,false], value: '' } +- !Scalar { implicit: [true,true], value: 'implicit scalar' } +- !Scalar { implicit: [true,true], value: 'data' } +- !Scalar { implicit: [true,true], value: 'quoted scalar' } +- !Scalar { value: 'data', style: '"' } +- !Scalar { implicit: [true,true], value: 'block scalar' } +- !Scalar { value: 'data', style: '|' } +- !Scalar { implicit: [true,true], value: 'empty scalar with tag' } +- !Scalar { implicit: [false,false], tag: '!mytag', value: '' } +- !Scalar { implicit: [true,true], value: 'implicit scalar with tag' } +- !Scalar { implicit: [false,false], tag: '!mytag', value: 'data' } +- !Scalar { implicit: [true,true], value: 'quoted scalar with tag' } +- !Scalar { value: 'data', style: '"', tag: '!mytag', implicit: [false,false] } +- !Scalar { implicit: [true,true], value: 'block scalar with tag' } +- !Scalar { value: 'data', style: '|', tag: '!mytag', implicit: [false,false] } +- !Scalar { implicit: [true,true], value: 'single character' } +- !Scalar { value: 'a', implicit: [true,true] } +- !Scalar { implicit: [true,true], value: 'single digit' } +- !Scalar { value: '1', implicit: [true,false] } +- !MappingEnd +- !DocumentEnd + +- !StreamEnd diff --git a/_test/data/scan-document-end-bug.canonical b/_test/data/scan-document-end-bug.canonical new file mode 100644 index 0000000..4a0e8a8 --- /dev/null +++ b/_test/data/scan-document-end-bug.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!null "" diff --git a/_test/data/scan-document-end-bug.data b/_test/data/scan-document-end-bug.data new file mode 100644 index 0000000..3c70543 --- /dev/null +++ b/_test/data/scan-document-end-bug.data @@ -0,0 +1,3 @@ +# Ticket #4 +--- +... \ No newline at end of file diff --git a/_test/data/scan-line-break-bug.canonical b/_test/data/scan-line-break-bug.canonical new file mode 100644 index 0000000..79f08b7 --- /dev/null +++ b/_test/data/scan-line-break-bug.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!map { ? !!str "foo" : !!str "bar baz" } diff --git a/_test/data/scan-line-break-bug.data b/_test/data/scan-line-break-bug.data new file mode 100644 index 0000000..c974fab --- /dev/null +++ b/_test/data/scan-line-break-bug.data @@ -0,0 +1,3 @@ +foo: + bar + baz diff --git a/_test/data/sequences.events b/_test/data/sequences.events new file mode 100644 index 0000000..692a329 --- /dev/null +++ b/_test/data/sequences.events @@ -0,0 +1,81 @@ +- !StreamStart + +- !DocumentStart +- !SequenceStart +- !SequenceEnd +- !DocumentEnd + +- !DocumentStart +- !SequenceStart { tag: '!mytag', implicit: false } +- !SequenceEnd +- !DocumentEnd + +- !DocumentStart +- !SequenceStart +- !SequenceStart +- !SequenceEnd +- !SequenceStart { tag: '!mytag', implicit: false } +- !SequenceEnd +- !SequenceStart +- !Scalar +- !Scalar { value: 'data' } +- !Scalar { tag: '!mytag', implicit: [false,false], value: 'data' } +- !SequenceEnd +- !SequenceStart +- !SequenceStart +- !SequenceStart +- !Scalar +- !SequenceEnd +- !SequenceEnd +- !SequenceEnd +- !SequenceStart +- !SequenceStart { tag: '!mytag', implicit: false } +- !SequenceStart +- !Scalar { value: 'data' } +- !SequenceEnd +- !SequenceEnd +- !SequenceEnd +- !SequenceEnd +- !DocumentEnd + +- !DocumentStart +- !SequenceStart +- !MappingStart +- !Scalar { value: 'key1' } +- !SequenceStart +- !Scalar { value: 'data1' } +- !Scalar { value: 'data2' } +- !SequenceEnd +- !Scalar { value: 'key2' } +- !SequenceStart { tag: '!mytag1', implicit: false } +- !Scalar { value: 'data3' } +- !SequenceStart +- !Scalar { value: 'data4' } +- !Scalar { value: 'data5' } +- !SequenceEnd +- !SequenceStart { tag: '!mytag2', implicit: false } +- !Scalar { value: 'data6' } +- !Scalar { value: 'data7' } +- !SequenceEnd +- !SequenceEnd +- !MappingEnd +- !SequenceEnd +- !DocumentEnd + +- !DocumentStart +- !SequenceStart +- !SequenceStart { flow_style: true } +- !SequenceStart +- !SequenceEnd +- !Scalar +- !Scalar { value: 'data' } +- !Scalar { tag: '!mytag', implicit: [false,false], value: 'data' } +- !SequenceStart { tag: '!mytag', implicit: false } +- !Scalar { value: 'data' } +- !Scalar { value: 'data' } +- !SequenceEnd +- !SequenceEnd +- !SequenceEnd +- !DocumentEnd + +- !StreamEnd diff --git a/_test/data/serializer-is-already-opened.dumper-error b/_test/data/serializer-is-already-opened.dumper-error new file mode 100644 index 0000000..9a23525 --- /dev/null +++ b/_test/data/serializer-is-already-opened.dumper-error @@ -0,0 +1,3 @@ +dumper = yaml.Dumper(StringIO()) +dumper.open() +dumper.open() diff --git a/_test/data/serializer-is-closed-1.dumper-error b/_test/data/serializer-is-closed-1.dumper-error new file mode 100644 index 0000000..8e7e600 --- /dev/null +++ b/_test/data/serializer-is-closed-1.dumper-error @@ -0,0 +1,4 @@ +dumper = yaml.Dumper(StringIO()) +dumper.open() +dumper.close() +dumper.open() diff --git a/_test/data/serializer-is-closed-2.dumper-error b/_test/data/serializer-is-closed-2.dumper-error new file mode 100644 index 0000000..89aef7e --- /dev/null +++ b/_test/data/serializer-is-closed-2.dumper-error @@ -0,0 +1,4 @@ +dumper = yaml.Dumper(StringIO()) +dumper.open() +dumper.close() +dumper.serialize(yaml.ScalarNode(tag='!foo', value='bar')) diff --git a/_test/data/serializer-is-not-opened-1.dumper-error b/_test/data/serializer-is-not-opened-1.dumper-error new file mode 100644 index 0000000..8f22e73 --- /dev/null +++ b/_test/data/serializer-is-not-opened-1.dumper-error @@ -0,0 +1,2 @@ +dumper = yaml.Dumper(StringIO()) +dumper.close() diff --git a/_test/data/serializer-is-not-opened-2.dumper-error b/_test/data/serializer-is-not-opened-2.dumper-error new file mode 100644 index 0000000..ebd9df1 --- /dev/null +++ b/_test/data/serializer-is-not-opened-2.dumper-error @@ -0,0 +1,2 @@ +dumper = yaml.Dumper(StringIO()) +dumper.serialize(yaml.ScalarNode(tag='!foo', value='bar')) diff --git a/_test/data/single-dot-is-not-float-bug.code b/_test/data/single-dot-is-not-float-bug.code new file mode 100644 index 0000000..dcd0c2f --- /dev/null +++ b/_test/data/single-dot-is-not-float-bug.code @@ -0,0 +1 @@ +'.' diff --git a/_test/data/single-dot-is-not-float-bug.data b/_test/data/single-dot-is-not-float-bug.data new file mode 100644 index 0000000..9c558e3 --- /dev/null +++ b/_test/data/single-dot-is-not-float-bug.data @@ -0,0 +1 @@ +. diff --git a/_test/data/sloppy-indentation.canonical b/_test/data/sloppy-indentation.canonical new file mode 100644 index 0000000..438bc04 --- /dev/null +++ b/_test/data/sloppy-indentation.canonical @@ -0,0 +1,18 @@ +%YAML 1.1 +--- +!!map { + ? !!str "in the block context" + : !!map { + ? !!str "indentation should be kept" + : !!map { + ? !!str "but in the flow context" + : !!seq [ !!str "it may be violated" ] + } + } +} +--- !!str +"the parser does not require scalars to be indented with at least one space" +--- !!str +"the parser does not require scalars to be indented with at least one space" +--- !!map +{ ? !!str "foo": { ? !!str "bar" : !!str "quoted scalars may not adhere indentation" } } diff --git a/_test/data/sloppy-indentation.data b/_test/data/sloppy-indentation.data new file mode 100644 index 0000000..2eb4f5a --- /dev/null +++ b/_test/data/sloppy-indentation.data @@ -0,0 +1,17 @@ +--- +in the block context: + indentation should be kept: { + but in the flow context: [ +it may be violated] +} +--- +the parser does not require scalars +to be indented with at least one space +... +--- +"the parser does not require scalars +to be indented with at least one space" +--- +foo: + bar: 'quoted scalars +may not adhere indentation' diff --git a/_test/data/spec-02-01.code b/_test/data/spec-02-01.code new file mode 100644 index 0000000..0e927a3 --- /dev/null +++ b/_test/data/spec-02-01.code @@ -0,0 +1 @@ +['Mark McGwire', 'Sammy Sosa', 'Ken Griffey'] diff --git a/_test/data/spec-02-01.data b/_test/data/spec-02-01.data new file mode 100644 index 0000000..d12e671 --- /dev/null +++ b/_test/data/spec-02-01.data @@ -0,0 +1,3 @@ +- Mark McGwire +- Sammy Sosa +- Ken Griffey diff --git a/_test/data/spec-02-01.structure b/_test/data/spec-02-01.structure new file mode 100644 index 0000000..f532f4a --- /dev/null +++ b/_test/data/spec-02-01.structure @@ -0,0 +1 @@ +[True, True, True] diff --git a/_test/data/spec-02-01.tokens b/_test/data/spec-02-01.tokens new file mode 100644 index 0000000..ce44cac --- /dev/null +++ b/_test/data/spec-02-01.tokens @@ -0,0 +1 @@ +[[ , _ , _ , _ ]} diff --git a/_test/data/spec-02-02.data b/_test/data/spec-02-02.data new file mode 100644 index 0000000..7b7ec94 --- /dev/null +++ b/_test/data/spec-02-02.data @@ -0,0 +1,3 @@ +hr: 65 # Home runs +avg: 0.278 # Batting average +rbi: 147 # Runs Batted In diff --git a/_test/data/spec-02-02.structure b/_test/data/spec-02-02.structure new file mode 100644 index 0000000..aba1ced --- /dev/null +++ b/_test/data/spec-02-02.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-02.tokens b/_test/data/spec-02-02.tokens new file mode 100644 index 0000000..e4e381b --- /dev/null +++ b/_test/data/spec-02-02.tokens @@ -0,0 +1,5 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-03.data b/_test/data/spec-02-03.data new file mode 100644 index 0000000..656d628 --- /dev/null +++ b/_test/data/spec-02-03.data @@ -0,0 +1,8 @@ +american: + - Boston Red Sox + - Detroit Tigers + - New York Yankees +national: + - New York Mets + - Chicago Cubs + - Atlanta Braves diff --git a/_test/data/spec-02-03.structure b/_test/data/spec-02-03.structure new file mode 100644 index 0000000..25de5d2 --- /dev/null +++ b/_test/data/spec-02-03.structure @@ -0,0 +1 @@ +[(True, [True, True, True]), (True, [True, True, True])] diff --git a/_test/data/spec-02-03.tokens b/_test/data/spec-02-03.tokens new file mode 100644 index 0000000..89815f2 --- /dev/null +++ b/_test/data/spec-02-03.tokens @@ -0,0 +1,4 @@ +{{ +? _ : [[ , _ , _ , _ ]} +? _ : [[ , _ , _ , _ ]} +]} diff --git a/_test/data/spec-02-04.data b/_test/data/spec-02-04.data new file mode 100644 index 0000000..430f6b3 --- /dev/null +++ b/_test/data/spec-02-04.data @@ -0,0 +1,8 @@ +- + name: Mark McGwire + hr: 65 + avg: 0.278 +- + name: Sammy Sosa + hr: 63 + avg: 0.288 diff --git a/_test/data/spec-02-04.structure b/_test/data/spec-02-04.structure new file mode 100644 index 0000000..e7b526c --- /dev/null +++ b/_test/data/spec-02-04.structure @@ -0,0 +1,4 @@ +[ + [(True, True), (True, True), (True, True)], + [(True, True), (True, True), (True, True)], +] diff --git a/_test/data/spec-02-04.tokens b/_test/data/spec-02-04.tokens new file mode 100644 index 0000000..9cb9815 --- /dev/null +++ b/_test/data/spec-02-04.tokens @@ -0,0 +1,4 @@ +[[ +, {{ ? _ : _ ? _ : _ ? _ : _ ]} +, {{ ? _ : _ ? _ : _ ? _ : _ ]} +]} diff --git a/_test/data/spec-02-05.data b/_test/data/spec-02-05.data new file mode 100644 index 0000000..cdd7770 --- /dev/null +++ b/_test/data/spec-02-05.data @@ -0,0 +1,3 @@ +- [name , hr, avg ] +- [Mark McGwire, 65, 0.278] +- [Sammy Sosa , 63, 0.288] diff --git a/_test/data/spec-02-05.structure b/_test/data/spec-02-05.structure new file mode 100644 index 0000000..e06b75a --- /dev/null +++ b/_test/data/spec-02-05.structure @@ -0,0 +1,5 @@ +[ + [True, True, True], + [True, True, True], + [True, True, True], +] diff --git a/_test/data/spec-02-05.tokens b/_test/data/spec-02-05.tokens new file mode 100644 index 0000000..3f6f1ab --- /dev/null +++ b/_test/data/spec-02-05.tokens @@ -0,0 +1,5 @@ +[[ +, [ _ , _ , _ ] +, [ _ , _ , _ ] +, [ _ , _ , _ ] +]} diff --git a/_test/data/spec-02-06.data b/_test/data/spec-02-06.data new file mode 100644 index 0000000..7a957b2 --- /dev/null +++ b/_test/data/spec-02-06.data @@ -0,0 +1,5 @@ +Mark McGwire: {hr: 65, avg: 0.278} +Sammy Sosa: { + hr: 63, + avg: 0.288 + } diff --git a/_test/data/spec-02-06.structure b/_test/data/spec-02-06.structure new file mode 100644 index 0000000..3ef0f4b --- /dev/null +++ b/_test/data/spec-02-06.structure @@ -0,0 +1,4 @@ +[ + (True, [(True, True), (True, True)]), + (True, [(True, True), (True, True)]), +] diff --git a/_test/data/spec-02-06.tokens b/_test/data/spec-02-06.tokens new file mode 100644 index 0000000..a1a5eef --- /dev/null +++ b/_test/data/spec-02-06.tokens @@ -0,0 +1,4 @@ +{{ +? _ : { ? _ : _ , ? _ : _ } +? _ : { ? _ : _ , ? _ : _ } +]} diff --git a/_test/data/spec-02-07.data b/_test/data/spec-02-07.data new file mode 100644 index 0000000..bc711d5 --- /dev/null +++ b/_test/data/spec-02-07.data @@ -0,0 +1,10 @@ +# Ranking of 1998 home runs +--- +- Mark McGwire +- Sammy Sosa +- Ken Griffey + +# Team ranking +--- +- Chicago Cubs +- St Louis Cardinals diff --git a/_test/data/spec-02-07.structure b/_test/data/spec-02-07.structure new file mode 100644 index 0000000..c5d72a3 --- /dev/null +++ b/_test/data/spec-02-07.structure @@ -0,0 +1,4 @@ +[ +[True, True, True], +[True, True], +] diff --git a/_test/data/spec-02-07.tokens b/_test/data/spec-02-07.tokens new file mode 100644 index 0000000..ed48883 --- /dev/null +++ b/_test/data/spec-02-07.tokens @@ -0,0 +1,12 @@ +--- +[[ +, _ +, _ +, _ +]} + +--- +[[ +, _ +, _ +]} diff --git a/_test/data/spec-02-08.data b/_test/data/spec-02-08.data new file mode 100644 index 0000000..05e102d --- /dev/null +++ b/_test/data/spec-02-08.data @@ -0,0 +1,10 @@ +--- +time: 20:03:20 +player: Sammy Sosa +action: strike (miss) +... +--- +time: 20:03:47 +player: Sammy Sosa +action: grand slam +... diff --git a/_test/data/spec-02-08.structure b/_test/data/spec-02-08.structure new file mode 100644 index 0000000..24cff73 --- /dev/null +++ b/_test/data/spec-02-08.structure @@ -0,0 +1,4 @@ +[ +[(True, True), (True, True), (True, True)], +[(True, True), (True, True), (True, True)], +] diff --git a/_test/data/spec-02-08.tokens b/_test/data/spec-02-08.tokens new file mode 100644 index 0000000..7d2c03d --- /dev/null +++ b/_test/data/spec-02-08.tokens @@ -0,0 +1,15 @@ +--- +{{ +? _ : _ +? _ : _ +? _ : _ +]} +... + +--- +{{ +? _ : _ +? _ : _ +? _ : _ +]} +... diff --git a/_test/data/spec-02-09.data b/_test/data/spec-02-09.data new file mode 100644 index 0000000..e264180 --- /dev/null +++ b/_test/data/spec-02-09.data @@ -0,0 +1,8 @@ +--- +hr: # 1998 hr ranking + - Mark McGwire + - Sammy Sosa +rbi: + # 1998 rbi ranking + - Sammy Sosa + - Ken Griffey diff --git a/_test/data/spec-02-09.structure b/_test/data/spec-02-09.structure new file mode 100644 index 0000000..b4c9914 --- /dev/null +++ b/_test/data/spec-02-09.structure @@ -0,0 +1 @@ +[(True, [True, True]), (True, [True, True])] diff --git a/_test/data/spec-02-09.tokens b/_test/data/spec-02-09.tokens new file mode 100644 index 0000000..b2ec10e --- /dev/null +++ b/_test/data/spec-02-09.tokens @@ -0,0 +1,5 @@ +--- +{{ +? _ : [[ , _ , _ ]} +? _ : [[ , _ , _ ]} +]} diff --git a/_test/data/spec-02-10.data b/_test/data/spec-02-10.data new file mode 100644 index 0000000..61808f6 --- /dev/null +++ b/_test/data/spec-02-10.data @@ -0,0 +1,8 @@ +--- +hr: + - Mark McGwire + # Following node labeled SS + - &SS Sammy Sosa +rbi: + - *SS # Subsequent occurrence + - Ken Griffey diff --git a/_test/data/spec-02-10.structure b/_test/data/spec-02-10.structure new file mode 100644 index 0000000..ff8f4c3 --- /dev/null +++ b/_test/data/spec-02-10.structure @@ -0,0 +1 @@ +[(True, [True, True]), (True, ['*', True])] diff --git a/_test/data/spec-02-10.tokens b/_test/data/spec-02-10.tokens new file mode 100644 index 0000000..26caa2b --- /dev/null +++ b/_test/data/spec-02-10.tokens @@ -0,0 +1,5 @@ +--- +{{ +? _ : [[ , _ , & _ ]} +? _ : [[ , * , _ ]} +]} diff --git a/_test/data/spec-02-11.code b/_test/data/spec-02-11.code new file mode 100644 index 0000000..6e02325 --- /dev/null +++ b/_test/data/spec-02-11.code @@ -0,0 +1,10 @@ +{ +('Detroit Tigers', 'Chicago cubs'): [datetime.date(2001, 7, 23)], + +('New York Yankees', 'Atlanta Braves'): + [datetime.date(2001, 7, 2), + datetime.date(2001, 8, 12), + datetime.date(2001, 8, 14)] +} + + diff --git a/_test/data/spec-02-11.data b/_test/data/spec-02-11.data new file mode 100644 index 0000000..9123ce2 --- /dev/null +++ b/_test/data/spec-02-11.data @@ -0,0 +1,9 @@ +? - Detroit Tigers + - Chicago cubs +: + - 2001-07-23 + +? [ New York Yankees, + Atlanta Braves ] +: [ 2001-07-02, 2001-08-12, + 2001-08-14 ] diff --git a/_test/data/spec-02-11.structure b/_test/data/spec-02-11.structure new file mode 100644 index 0000000..3d8f1ff --- /dev/null +++ b/_test/data/spec-02-11.structure @@ -0,0 +1,4 @@ +[ +([True, True], [True]), +([True, True], [True, True, True]), +] diff --git a/_test/data/spec-02-11.tokens b/_test/data/spec-02-11.tokens new file mode 100644 index 0000000..fe24203 --- /dev/null +++ b/_test/data/spec-02-11.tokens @@ -0,0 +1,6 @@ +{{ +? [[ , _ , _ ]} +: [[ , _ ]} +? [ _ , _ ] +: [ _ , _ , _ ] +]} diff --git a/_test/data/spec-02-12.data b/_test/data/spec-02-12.data new file mode 100644 index 0000000..1fc33f9 --- /dev/null +++ b/_test/data/spec-02-12.data @@ -0,0 +1,8 @@ +--- +# products purchased +- item : Super Hoop + quantity: 1 +- item : Basketball + quantity: 4 +- item : Big Shoes + quantity: 1 diff --git a/_test/data/spec-02-12.structure b/_test/data/spec-02-12.structure new file mode 100644 index 0000000..e9c5359 --- /dev/null +++ b/_test/data/spec-02-12.structure @@ -0,0 +1,5 @@ +[ +[(True, True), (True, True)], +[(True, True), (True, True)], +[(True, True), (True, True)], +] diff --git a/_test/data/spec-02-12.tokens b/_test/data/spec-02-12.tokens new file mode 100644 index 0000000..ea21e50 --- /dev/null +++ b/_test/data/spec-02-12.tokens @@ -0,0 +1,6 @@ +--- +[[ +, {{ ? _ : _ ? _ : _ ]} +, {{ ? _ : _ ? _ : _ ]} +, {{ ? _ : _ ? _ : _ ]} +]} diff --git a/_test/data/spec-02-13.data b/_test/data/spec-02-13.data new file mode 100644 index 0000000..13fb656 --- /dev/null +++ b/_test/data/spec-02-13.data @@ -0,0 +1,4 @@ +# ASCII Art +--- | + \//||\/|| + // || ||__ diff --git a/_test/data/spec-02-13.structure b/_test/data/spec-02-13.structure new file mode 100644 index 0000000..0ca9514 --- /dev/null +++ b/_test/data/spec-02-13.structure @@ -0,0 +1 @@ +True diff --git a/_test/data/spec-02-13.tokens b/_test/data/spec-02-13.tokens new file mode 100644 index 0000000..7456c05 --- /dev/null +++ b/_test/data/spec-02-13.tokens @@ -0,0 +1 @@ +--- _ diff --git a/_test/data/spec-02-14.data b/_test/data/spec-02-14.data new file mode 100644 index 0000000..59943de --- /dev/null +++ b/_test/data/spec-02-14.data @@ -0,0 +1,4 @@ +--- + Mark McGwire's + year was crippled + by a knee injury. diff --git a/_test/data/spec-02-14.structure b/_test/data/spec-02-14.structure new file mode 100644 index 0000000..0ca9514 --- /dev/null +++ b/_test/data/spec-02-14.structure @@ -0,0 +1 @@ +True diff --git a/_test/data/spec-02-14.tokens b/_test/data/spec-02-14.tokens new file mode 100644 index 0000000..7456c05 --- /dev/null +++ b/_test/data/spec-02-14.tokens @@ -0,0 +1 @@ +--- _ diff --git a/_test/data/spec-02-15.data b/_test/data/spec-02-15.data new file mode 100644 index 0000000..80b89a6 --- /dev/null +++ b/_test/data/spec-02-15.data @@ -0,0 +1,8 @@ +> + Sammy Sosa completed another + fine season with great stats. + + 63 Home Runs + 0.288 Batting Average + + What a year! diff --git a/_test/data/spec-02-15.structure b/_test/data/spec-02-15.structure new file mode 100644 index 0000000..0ca9514 --- /dev/null +++ b/_test/data/spec-02-15.structure @@ -0,0 +1 @@ +True diff --git a/_test/data/spec-02-15.tokens b/_test/data/spec-02-15.tokens new file mode 100644 index 0000000..31354ec --- /dev/null +++ b/_test/data/spec-02-15.tokens @@ -0,0 +1 @@ +_ diff --git a/_test/data/spec-02-16.data b/_test/data/spec-02-16.data new file mode 100644 index 0000000..9f66d88 --- /dev/null +++ b/_test/data/spec-02-16.data @@ -0,0 +1,7 @@ +name: Mark McGwire +accomplishment: > + Mark set a major league + home run record in 1998. +stats: | + 65 Home Runs + 0.278 Batting Average diff --git a/_test/data/spec-02-16.structure b/_test/data/spec-02-16.structure new file mode 100644 index 0000000..aba1ced --- /dev/null +++ b/_test/data/spec-02-16.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-16.tokens b/_test/data/spec-02-16.tokens new file mode 100644 index 0000000..e4e381b --- /dev/null +++ b/_test/data/spec-02-16.tokens @@ -0,0 +1,5 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-17.data b/_test/data/spec-02-17.data new file mode 100644 index 0000000..b2870c5 --- /dev/null +++ b/_test/data/spec-02-17.data @@ -0,0 +1,7 @@ +unicode: "Sosa did fine.\u263A" +control: "\b1998\t1999\t2000\n" +hexesc: "\x13\x10 is \r\n" + +single: '"Howdy!" he cried.' +quoted: ' # not a ''comment''.' +tie-fighter: '|\-*-/|' diff --git a/_test/data/spec-02-17.structure b/_test/data/spec-02-17.structure new file mode 100644 index 0000000..933646d --- /dev/null +++ b/_test/data/spec-02-17.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True), (True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-17.tokens b/_test/data/spec-02-17.tokens new file mode 100644 index 0000000..db65540 --- /dev/null +++ b/_test/data/spec-02-17.tokens @@ -0,0 +1,8 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-18.data b/_test/data/spec-02-18.data new file mode 100644 index 0000000..e0a8bfa --- /dev/null +++ b/_test/data/spec-02-18.data @@ -0,0 +1,6 @@ +plain: + This unquoted scalar + spans many lines. + +quoted: "So does this + quoted scalar.\n" diff --git a/_test/data/spec-02-18.structure b/_test/data/spec-02-18.structure new file mode 100644 index 0000000..0ca4991 --- /dev/null +++ b/_test/data/spec-02-18.structure @@ -0,0 +1 @@ +[(True, True), (True, True)] diff --git a/_test/data/spec-02-18.tokens b/_test/data/spec-02-18.tokens new file mode 100644 index 0000000..83b31dc --- /dev/null +++ b/_test/data/spec-02-18.tokens @@ -0,0 +1,4 @@ +{{ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-19.data b/_test/data/spec-02-19.data new file mode 100644 index 0000000..bf69de6 --- /dev/null +++ b/_test/data/spec-02-19.data @@ -0,0 +1,5 @@ +canonical: 12345 +decimal: +12,345 +sexagesimal: 3:25:45 +octal: 014 +hexadecimal: 0xC diff --git a/_test/data/spec-02-19.structure b/_test/data/spec-02-19.structure new file mode 100644 index 0000000..48ca99d --- /dev/null +++ b/_test/data/spec-02-19.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-19.tokens b/_test/data/spec-02-19.tokens new file mode 100644 index 0000000..5bda68f --- /dev/null +++ b/_test/data/spec-02-19.tokens @@ -0,0 +1,7 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-20.data b/_test/data/spec-02-20.data new file mode 100644 index 0000000..1d4897f --- /dev/null +++ b/_test/data/spec-02-20.data @@ -0,0 +1,6 @@ +canonical: 1.23015e+3 +exponential: 12.3015e+02 +sexagesimal: 20:30.15 +fixed: 1,230.15 +negative infinity: -.inf +not a number: .NaN diff --git a/_test/data/spec-02-20.structure b/_test/data/spec-02-20.structure new file mode 100644 index 0000000..933646d --- /dev/null +++ b/_test/data/spec-02-20.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True), (True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-20.tokens b/_test/data/spec-02-20.tokens new file mode 100644 index 0000000..db65540 --- /dev/null +++ b/_test/data/spec-02-20.tokens @@ -0,0 +1,8 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-21.data b/_test/data/spec-02-21.data new file mode 100644 index 0000000..dec6a56 --- /dev/null +++ b/_test/data/spec-02-21.data @@ -0,0 +1,4 @@ +null: ~ +true: y +false: n +string: '12345' diff --git a/_test/data/spec-02-21.structure b/_test/data/spec-02-21.structure new file mode 100644 index 0000000..021635f --- /dev/null +++ b/_test/data/spec-02-21.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-21.tokens b/_test/data/spec-02-21.tokens new file mode 100644 index 0000000..aeccbaf --- /dev/null +++ b/_test/data/spec-02-21.tokens @@ -0,0 +1,6 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-22.data b/_test/data/spec-02-22.data new file mode 100644 index 0000000..aaac185 --- /dev/null +++ b/_test/data/spec-02-22.data @@ -0,0 +1,4 @@ +canonical: 2001-12-15T02:59:43.1Z +iso8601: 2001-12-14t21:59:43.10-05:00 +spaced: 2001-12-14 21:59:43.10 -5 +date: 2002-12-14 diff --git a/_test/data/spec-02-22.structure b/_test/data/spec-02-22.structure new file mode 100644 index 0000000..021635f --- /dev/null +++ b/_test/data/spec-02-22.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-22.tokens b/_test/data/spec-02-22.tokens new file mode 100644 index 0000000..aeccbaf --- /dev/null +++ b/_test/data/spec-02-22.tokens @@ -0,0 +1,6 @@ +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-23.data b/_test/data/spec-02-23.data new file mode 100644 index 0000000..5dbd992 --- /dev/null +++ b/_test/data/spec-02-23.data @@ -0,0 +1,13 @@ +--- +not-date: !!str 2002-04-28 + +picture: !!binary | + R0lGODlhDAAMAIQAAP//9/X + 17unp5WZmZgAAAOfn515eXv + Pz7Y6OjuDg4J+fn5OTk6enp + 56enmleECcgggoBADs= + +application specific tag: !something | + The semantics of the tag + above may be different for + different documents. diff --git a/_test/data/spec-02-23.structure b/_test/data/spec-02-23.structure new file mode 100644 index 0000000..aba1ced --- /dev/null +++ b/_test/data/spec-02-23.structure @@ -0,0 +1 @@ +[(True, True), (True, True), (True, True)] diff --git a/_test/data/spec-02-23.tokens b/_test/data/spec-02-23.tokens new file mode 100644 index 0000000..9ac54aa --- /dev/null +++ b/_test/data/spec-02-23.tokens @@ -0,0 +1,6 @@ +--- +{{ +? _ : ! _ +? _ : ! _ +? _ : ! _ +]} diff --git a/_test/data/spec-02-24.data b/_test/data/spec-02-24.data new file mode 100644 index 0000000..1180757 --- /dev/null +++ b/_test/data/spec-02-24.data @@ -0,0 +1,14 @@ +%TAG ! tag:clarkevans.com,2002: +--- !shape + # Use the ! handle for presenting + # tag:clarkevans.com,2002:circle +- !circle + center: &ORIGIN {x: 73, y: 129} + radius: 7 +- !line + start: *ORIGIN + finish: { x: 89, y: 102 } +- !label + start: *ORIGIN + color: 0xFFEEBB + text: Pretty vector drawing. diff --git a/_test/data/spec-02-24.structure b/_test/data/spec-02-24.structure new file mode 100644 index 0000000..a800729 --- /dev/null +++ b/_test/data/spec-02-24.structure @@ -0,0 +1,5 @@ +[ +[(True, [(True, True), (True, True)]), (True, True)], +[(True, '*'), (True, [(True, True), (True, True)])], +[(True, '*'), (True, True), (True, True)], +] diff --git a/_test/data/spec-02-24.tokens b/_test/data/spec-02-24.tokens new file mode 100644 index 0000000..039c385 --- /dev/null +++ b/_test/data/spec-02-24.tokens @@ -0,0 +1,20 @@ +% +--- ! +[[ +, ! + {{ + ? _ : & { ? _ : _ , ? _ : _ } + ? _ : _ + ]} +, ! + {{ + ? _ : * + ? _ : { ? _ : _ , ? _ : _ } + ]} +, ! + {{ + ? _ : * + ? _ : _ + ? _ : _ + ]} +]} diff --git a/_test/data/spec-02-25.data b/_test/data/spec-02-25.data new file mode 100644 index 0000000..769ac31 --- /dev/null +++ b/_test/data/spec-02-25.data @@ -0,0 +1,7 @@ +# sets are represented as a +# mapping where each key is +# associated with the empty string +--- !!set +? Mark McGwire +? Sammy Sosa +? Ken Griff diff --git a/_test/data/spec-02-25.structure b/_test/data/spec-02-25.structure new file mode 100644 index 0000000..0b40e61 --- /dev/null +++ b/_test/data/spec-02-25.structure @@ -0,0 +1 @@ +[(True, None), (True, None), (True, None)] diff --git a/_test/data/spec-02-25.tokens b/_test/data/spec-02-25.tokens new file mode 100644 index 0000000..b700236 --- /dev/null +++ b/_test/data/spec-02-25.tokens @@ -0,0 +1,6 @@ +--- ! +{{ +? _ +? _ +? _ +]} diff --git a/_test/data/spec-02-26.data b/_test/data/spec-02-26.data new file mode 100644 index 0000000..3143763 --- /dev/null +++ b/_test/data/spec-02-26.data @@ -0,0 +1,7 @@ +# ordered maps are represented as +# a sequence of mappings, with +# each mapping having one key +--- !!omap +- Mark McGwire: 65 +- Sammy Sosa: 63 +- Ken Griffy: 58 diff --git a/_test/data/spec-02-26.structure b/_test/data/spec-02-26.structure new file mode 100644 index 0000000..cf429b9 --- /dev/null +++ b/_test/data/spec-02-26.structure @@ -0,0 +1,5 @@ +[ +[(True, True)], +[(True, True)], +[(True, True)], +] diff --git a/_test/data/spec-02-26.tokens b/_test/data/spec-02-26.tokens new file mode 100644 index 0000000..7bee492 --- /dev/null +++ b/_test/data/spec-02-26.tokens @@ -0,0 +1,6 @@ +--- ! +[[ +, {{ ? _ : _ ]} +, {{ ? _ : _ ]} +, {{ ? _ : _ ]} +]} diff --git a/_test/data/spec-02-27.data b/_test/data/spec-02-27.data new file mode 100644 index 0000000..4625739 --- /dev/null +++ b/_test/data/spec-02-27.data @@ -0,0 +1,29 @@ +--- ! +invoice: 34843 +date : 2001-01-23 +bill-to: &id001 + given : Chris + family : Dumars + address: + lines: | + 458 Walkman Dr. + Suite #292 + city : Royal Oak + state : MI + postal : 48046 +ship-to: *id001 +product: + - sku : BL394D + quantity : 4 + description : Basketball + price : 450.00 + - sku : BL4438H + quantity : 1 + description : Super Hoop + price : 2392.00 +tax : 251.42 +total: 4443.52 +comments: + Late afternoon is best. + Backup contact is Nancy + Billsmer @ 338-4338. diff --git a/_test/data/spec-02-27.structure b/_test/data/spec-02-27.structure new file mode 100644 index 0000000..a2113b9 --- /dev/null +++ b/_test/data/spec-02-27.structure @@ -0,0 +1,17 @@ +[ +(True, True), +(True, True), +(True, [ + (True, True), + (True, True), + (True, [(True, True), (True, True), (True, True), (True, True)]), + ]), +(True, '*'), +(True, [ + [(True, True), (True, True), (True, True), (True, True)], + [(True, True), (True, True), (True, True), (True, True)], + ]), +(True, True), +(True, True), +(True, True), +] diff --git a/_test/data/spec-02-27.tokens b/_test/data/spec-02-27.tokens new file mode 100644 index 0000000..2dc1c25 --- /dev/null +++ b/_test/data/spec-02-27.tokens @@ -0,0 +1,20 @@ +--- ! +{{ +? _ : _ +? _ : _ +? _ : & + {{ + ? _ : _ + ? _ : _ + ? _ : {{ ? _ : _ ? _ : _ ? _ : _ ? _ : _ ]} + ]} +? _ : * +? _ : + [[ + , {{ ? _ : _ ? _ : _ ? _ : _ ? _ : _ ]} + , {{ ? _ : _ ? _ : _ ? _ : _ ? _ : _ ]} + ]} +? _ : _ +? _ : _ +? _ : _ +]} diff --git a/_test/data/spec-02-28.data b/_test/data/spec-02-28.data new file mode 100644 index 0000000..a5c8dc8 --- /dev/null +++ b/_test/data/spec-02-28.data @@ -0,0 +1,26 @@ +--- +Time: 2001-11-23 15:01:42 -5 +User: ed +Warning: + This is an error message + for the log file +--- +Time: 2001-11-23 15:02:31 -5 +User: ed +Warning: + A slightly different error + message. +--- +Date: 2001-11-23 15:03:17 -5 +User: ed +Fatal: + Unknown variable "bar" +Stack: + - file: TopClass.py + line: 23 + code: | + x = MoreObject("345\n") + - file: MoreClass.py + line: 58 + code: |- + foo = bar diff --git a/_test/data/spec-02-28.structure b/_test/data/spec-02-28.structure new file mode 100644 index 0000000..8ec0b56 --- /dev/null +++ b/_test/data/spec-02-28.structure @@ -0,0 +1,10 @@ +[ +[(True, True), (True, True), (True, True)], +[(True, True), (True, True), (True, True)], +[(True, True), (True, True), (True, True), +(True, [ + [(True, True), (True, True), (True, True)], + [(True, True), (True, True), (True, True)], + ]), +] +] diff --git a/_test/data/spec-02-28.tokens b/_test/data/spec-02-28.tokens new file mode 100644 index 0000000..8d5e1bc --- /dev/null +++ b/_test/data/spec-02-28.tokens @@ -0,0 +1,23 @@ +--- +{{ +? _ : _ +? _ : _ +? _ : _ +]} +--- +{{ +? _ : _ +? _ : _ +? _ : _ +]} +--- +{{ +? _ : _ +? _ : _ +? _ : _ +? _ : + [[ + , {{ ? _ : _ ? _ : _ ? _ : _ ]} + , {{ ? _ : _ ? _ : _ ? _ : _ ]} + ]} +]} diff --git a/_test/data/spec-05-01-utf16be.data b/_test/data/spec-05-01-utf16be.data new file mode 100644 index 0000000..3525062 Binary files /dev/null and b/_test/data/spec-05-01-utf16be.data differ diff --git a/_test/data/spec-05-01-utf16be.empty b/_test/data/spec-05-01-utf16be.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-05-01-utf16be.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-05-01-utf16le.data b/_test/data/spec-05-01-utf16le.data new file mode 100644 index 0000000..0823f74 Binary files /dev/null and b/_test/data/spec-05-01-utf16le.data differ diff --git a/_test/data/spec-05-01-utf16le.empty b/_test/data/spec-05-01-utf16le.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-05-01-utf16le.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-05-01-utf8.data b/_test/data/spec-05-01-utf8.data new file mode 100644 index 0000000..780d25b --- /dev/null +++ b/_test/data/spec-05-01-utf8.data @@ -0,0 +1 @@ +# Comment only. diff --git a/_test/data/spec-05-01-utf8.empty b/_test/data/spec-05-01-utf8.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-05-01-utf8.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-05-02-utf16be.data b/_test/data/spec-05-02-utf16be.data new file mode 100644 index 0000000..5ebbb04 Binary files /dev/null and b/_test/data/spec-05-02-utf16be.data differ diff --git a/_test/data/spec-05-02-utf16be.error b/_test/data/spec-05-02-utf16be.error new file mode 100644 index 0000000..1df3616 --- /dev/null +++ b/_test/data/spec-05-02-utf16be.error @@ -0,0 +1,3 @@ +ERROR: + A BOM must not appear + inside a document. diff --git a/_test/data/spec-05-02-utf16le.data b/_test/data/spec-05-02-utf16le.data new file mode 100644 index 0000000..0cd90a2 Binary files /dev/null and b/_test/data/spec-05-02-utf16le.data differ diff --git a/_test/data/spec-05-02-utf16le.error b/_test/data/spec-05-02-utf16le.error new file mode 100644 index 0000000..1df3616 --- /dev/null +++ b/_test/data/spec-05-02-utf16le.error @@ -0,0 +1,3 @@ +ERROR: + A BOM must not appear + inside a document. diff --git a/_test/data/spec-05-02-utf8.data b/_test/data/spec-05-02-utf8.data new file mode 100644 index 0000000..fb74866 --- /dev/null +++ b/_test/data/spec-05-02-utf8.data @@ -0,0 +1,3 @@ +# Invalid use of BOM +# inside a +# document. diff --git a/_test/data/spec-05-02-utf8.error b/_test/data/spec-05-02-utf8.error new file mode 100644 index 0000000..1df3616 --- /dev/null +++ b/_test/data/spec-05-02-utf8.error @@ -0,0 +1,3 @@ +ERROR: + A BOM must not appear + inside a document. diff --git a/_test/data/spec-05-03.canonical b/_test/data/spec-05-03.canonical new file mode 100644 index 0000000..a143a73 --- /dev/null +++ b/_test/data/spec-05-03.canonical @@ -0,0 +1,14 @@ +%YAML 1.1 +--- +!!map { + ? !!str "sequence" + : !!seq [ + !!str "one", !!str "two" + ], + ? !!str "mapping" + : !!map { + ? !!str "sky" : !!str "blue", +# ? !!str "sea" : !!str "green", + ? !!map { ? !!str "sea" : !!str "green" } : !!null "", + } +} diff --git a/_test/data/spec-05-03.data b/_test/data/spec-05-03.data new file mode 100644 index 0000000..4661f33 --- /dev/null +++ b/_test/data/spec-05-03.data @@ -0,0 +1,7 @@ +sequence: +- one +- two +mapping: + ? sky + : blue + ? sea : green diff --git a/_test/data/spec-05-04.canonical b/_test/data/spec-05-04.canonical new file mode 100644 index 0000000..00c9723 --- /dev/null +++ b/_test/data/spec-05-04.canonical @@ -0,0 +1,13 @@ +%YAML 1.1 +--- +!!map { + ? !!str "sequence" + : !!seq [ + !!str "one", !!str "two" + ], + ? !!str "mapping" + : !!map { + ? !!str "sky" : !!str "blue", + ? !!str "sea" : !!str "green", + } +} diff --git a/_test/data/spec-05-04.data b/_test/data/spec-05-04.data new file mode 100644 index 0000000..df33847 --- /dev/null +++ b/_test/data/spec-05-04.data @@ -0,0 +1,2 @@ +sequence: [ one, two, ] +mapping: { sky: blue, sea: green } diff --git a/_test/data/spec-05-05.data b/_test/data/spec-05-05.data new file mode 100644 index 0000000..62524c0 --- /dev/null +++ b/_test/data/spec-05-05.data @@ -0,0 +1 @@ +# Comment only. diff --git a/_test/data/spec-05-05.empty b/_test/data/spec-05-05.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-05-05.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-05-06.canonical b/_test/data/spec-05-06.canonical new file mode 100644 index 0000000..4f30c11 --- /dev/null +++ b/_test/data/spec-05-06.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "anchored" + : &A1 !local "value", + ? !!str "alias" + : *A1, +} diff --git a/_test/data/spec-05-06.data b/_test/data/spec-05-06.data new file mode 100644 index 0000000..7a1f9b3 --- /dev/null +++ b/_test/data/spec-05-06.data @@ -0,0 +1,2 @@ +anchored: !local &anchor value +alias: *anchor diff --git a/_test/data/spec-05-07.canonical b/_test/data/spec-05-07.canonical new file mode 100644 index 0000000..dc3732a --- /dev/null +++ b/_test/data/spec-05-07.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "literal" + : !!str "text\n", + ? !!str "folded" + : !!str "text\n", +} diff --git a/_test/data/spec-05-07.data b/_test/data/spec-05-07.data new file mode 100644 index 0000000..97eb3a3 --- /dev/null +++ b/_test/data/spec-05-07.data @@ -0,0 +1,4 @@ +literal: | + text +folded: > + text diff --git a/_test/data/spec-05-08.canonical b/_test/data/spec-05-08.canonical new file mode 100644 index 0000000..610bd68 --- /dev/null +++ b/_test/data/spec-05-08.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "single" + : !!str "text", + ? !!str "double" + : !!str "text", +} diff --git a/_test/data/spec-05-08.data b/_test/data/spec-05-08.data new file mode 100644 index 0000000..04ebf69 --- /dev/null +++ b/_test/data/spec-05-08.data @@ -0,0 +1,2 @@ +single: 'text' +double: "text" diff --git a/_test/data/spec-05-09.canonical b/_test/data/spec-05-09.canonical new file mode 100644 index 0000000..597e3de --- /dev/null +++ b/_test/data/spec-05-09.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "text" diff --git a/_test/data/spec-05-09.data b/_test/data/spec-05-09.data new file mode 100644 index 0000000..a43431b --- /dev/null +++ b/_test/data/spec-05-09.data @@ -0,0 +1,2 @@ +%YAML 1.1 +--- text diff --git a/_test/data/spec-05-10.data b/_test/data/spec-05-10.data new file mode 100644 index 0000000..a4caf91 --- /dev/null +++ b/_test/data/spec-05-10.data @@ -0,0 +1,2 @@ +commercial-at: @text +grave-accent: `text diff --git a/_test/data/spec-05-10.error b/_test/data/spec-05-10.error new file mode 100644 index 0000000..46f776e --- /dev/null +++ b/_test/data/spec-05-10.error @@ -0,0 +1,3 @@ +ERROR: + Reserved indicators can't + start a plain scalar. diff --git a/_test/data/spec-05-11.canonical b/_test/data/spec-05-11.canonical new file mode 100644 index 0000000..fc25bef --- /dev/null +++ b/_test/data/spec-05-11.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- !!str +"Generic line break (no glyph)\n\ + Generic line break (glyphed)\n\ + Line separator\u2028\ + Paragraph separator\u2029" diff --git a/_test/data/spec-05-11.data b/_test/data/spec-05-11.data new file mode 100644 index 0000000..b448b75 --- /dev/null +++ b/_test/data/spec-05-11.data @@ -0,0 +1,3 @@ +| + Generic line break (no glyph) + Generic line break (glyphed)Â… Line separator
 Paragraph separator
 \ No newline at end of file diff --git a/_test/data/spec-05-12.data b/_test/data/spec-05-12.data new file mode 100644 index 0000000..7c3ad7f --- /dev/null +++ b/_test/data/spec-05-12.data @@ -0,0 +1,9 @@ +# Tabs do's and don'ts: +# comment: +quoted: "Quoted " +block: | + void main() { + printf("Hello, world!\n"); + } +elsewhere: # separation + indentation, in plain scalar diff --git a/_test/data/spec-05-12.error b/_test/data/spec-05-12.error new file mode 100644 index 0000000..8aad4c8 --- /dev/null +++ b/_test/data/spec-05-12.error @@ -0,0 +1,8 @@ +ERROR: + Tabs may appear inside + comments and quoted or + block scalar content. + Tabs must not appear + elsewhere, such as + in indentation and + separation spaces. diff --git a/_test/data/spec-05-13.canonical b/_test/data/spec-05-13.canonical new file mode 100644 index 0000000..90c1c5c --- /dev/null +++ b/_test/data/spec-05-13.canonical @@ -0,0 +1,5 @@ +%YAML 1.1 +--- !!str +"Text containing \ + both space and \ + tab characters" diff --git a/_test/data/spec-05-13.data b/_test/data/spec-05-13.data new file mode 100644 index 0000000..fce7951 --- /dev/null +++ b/_test/data/spec-05-13.data @@ -0,0 +1,3 @@ + "Text containing + both space and + tab characters" diff --git a/_test/data/spec-05-14.canonical b/_test/data/spec-05-14.canonical new file mode 100644 index 0000000..4bff01c --- /dev/null +++ b/_test/data/spec-05-14.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +"Fun with \x5C + \x22 \x07 \x08 \x1B \x0C + \x0A \x0D \x09 \x0B \x00 + \x20 \xA0 \x85 \u2028 \u2029 + A A A" diff --git a/_test/data/spec-05-14.data b/_test/data/spec-05-14.data new file mode 100644 index 0000000..d6e8ce4 --- /dev/null +++ b/_test/data/spec-05-14.data @@ -0,0 +1,2 @@ +"Fun with \\ + \" \a \b \e \f \Â… \n \r \t \v \0 \
 \ \_ \N \L \P \
 \x41 \u0041 \U00000041" diff --git a/_test/data/spec-05-15.data b/_test/data/spec-05-15.data new file mode 100644 index 0000000..7bf12b6 --- /dev/null +++ b/_test/data/spec-05-15.data @@ -0,0 +1,3 @@ +Bad escapes: + "\c + \xq-" diff --git a/_test/data/spec-05-15.error b/_test/data/spec-05-15.error new file mode 100644 index 0000000..71ffbd9 --- /dev/null +++ b/_test/data/spec-05-15.error @@ -0,0 +1,3 @@ +ERROR: +- c is an invalid escaped character. +- q and - are invalid hex digits. diff --git a/_test/data/spec-06-01.canonical b/_test/data/spec-06-01.canonical new file mode 100644 index 0000000..f17ec92 --- /dev/null +++ b/_test/data/spec-06-01.canonical @@ -0,0 +1,15 @@ +%YAML 1.1 +--- +!!map { + ? !!str "Not indented" + : !!map { + ? !!str "By one space" + : !!str "By four\n spaces\n", + ? !!str "Flow style" + : !!seq [ + !!str "By two", + !!str "Also by two", + !!str "Still by two", + ] + } +} diff --git a/_test/data/spec-06-01.data b/_test/data/spec-06-01.data new file mode 100644 index 0000000..6134ba1 --- /dev/null +++ b/_test/data/spec-06-01.data @@ -0,0 +1,14 @@ + # Leading comment line spaces are + # neither content nor indentation. + +Not indented: + By one space: | + By four + spaces + Flow style: [ # Leading spaces + By two, # in flow style + Also by two, # are neither +# Tabs are not allowed: +# Still by two # content nor + Still by two # content nor + ] # indentation. diff --git a/_test/data/spec-06-02.data b/_test/data/spec-06-02.data new file mode 100644 index 0000000..ff741e5 --- /dev/null +++ b/_test/data/spec-06-02.data @@ -0,0 +1,3 @@ + # Comment + + diff --git a/_test/data/spec-06-02.empty b/_test/data/spec-06-02.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-06-02.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-06-03.canonical b/_test/data/spec-06-03.canonical new file mode 100644 index 0000000..ec26902 --- /dev/null +++ b/_test/data/spec-06-03.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!map { + ? !!str "key" + : !!str "value" +} diff --git a/_test/data/spec-06-03.data b/_test/data/spec-06-03.data new file mode 100644 index 0000000..9db0912 --- /dev/null +++ b/_test/data/spec-06-03.data @@ -0,0 +1,2 @@ +key: # Comment + value diff --git a/_test/data/spec-06-04.canonical b/_test/data/spec-06-04.canonical new file mode 100644 index 0000000..ec26902 --- /dev/null +++ b/_test/data/spec-06-04.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!map { + ? !!str "key" + : !!str "value" +} diff --git a/_test/data/spec-06-04.data b/_test/data/spec-06-04.data new file mode 100644 index 0000000..86308dd --- /dev/null +++ b/_test/data/spec-06-04.data @@ -0,0 +1,4 @@ +key: # Comment + # lines + value + diff --git a/_test/data/spec-06-05.canonical b/_test/data/spec-06-05.canonical new file mode 100644 index 0000000..8da431d --- /dev/null +++ b/_test/data/spec-06-05.canonical @@ -0,0 +1,16 @@ +%YAML 1.1 +--- +!!map { + ? !!map { + ? !!str "first" + : !!str "Sammy", + ? !!str "last" + : !!str "Sosa" + } + : !!map { + ? !!str "hr" + : !!int "65", + ? !!str "avg" + : !!float "0.278" + } +} diff --git a/_test/data/spec-06-05.data b/_test/data/spec-06-05.data new file mode 100644 index 0000000..37613f5 --- /dev/null +++ b/_test/data/spec-06-05.data @@ -0,0 +1,6 @@ +{ first: Sammy, last: Sosa }: +# Statistics: + hr: # Home runs + 65 + avg: # Average + 0.278 diff --git a/_test/data/spec-06-06.canonical b/_test/data/spec-06-06.canonical new file mode 100644 index 0000000..513d07a --- /dev/null +++ b/_test/data/spec-06-06.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!map { + ? !!str "plain" + : !!str "text lines", + ? !!str "quoted" + : !!str "text lines", + ? !!str "block" + : !!str "text\n lines\n" +} diff --git a/_test/data/spec-06-06.data b/_test/data/spec-06-06.data new file mode 100644 index 0000000..2f62d08 --- /dev/null +++ b/_test/data/spec-06-06.data @@ -0,0 +1,7 @@ +plain: text + lines +quoted: "text + lines" +block: | + text + lines diff --git a/_test/data/spec-06-07.canonical b/_test/data/spec-06-07.canonical new file mode 100644 index 0000000..11357e4 --- /dev/null +++ b/_test/data/spec-06-07.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!seq [ + !!str "foo\nbar", + !!str "foo\n\nbar" +] diff --git a/_test/data/spec-06-07.data b/_test/data/spec-06-07.data new file mode 100644 index 0000000..130cfa7 --- /dev/null +++ b/_test/data/spec-06-07.data @@ -0,0 +1,8 @@ +- foo + + bar +- |- + foo + + bar + diff --git a/_test/data/spec-06-08.canonical b/_test/data/spec-06-08.canonical new file mode 100644 index 0000000..cc72bc8 --- /dev/null +++ b/_test/data/spec-06-08.canonical @@ -0,0 +1,5 @@ +%YAML 1.1 +--- !!str +"specific\L\ + trimmed\n\n\n\ + as space" diff --git a/_test/data/spec-06-08.data b/_test/data/spec-06-08.data new file mode 100644 index 0000000..f2896ed --- /dev/null +++ b/_test/data/spec-06-08.data @@ -0,0 +1,2 @@ +>- + specific
 trimmedÂ… Â… Â…Â… asÂ… space diff --git a/_test/data/spec-07-01.canonical b/_test/data/spec-07-01.canonical new file mode 100644 index 0000000..8c8c48d --- /dev/null +++ b/_test/data/spec-07-01.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- !!str +"foo" diff --git a/_test/data/spec-07-01.data b/_test/data/spec-07-01.data new file mode 100644 index 0000000..2113eb6 --- /dev/null +++ b/_test/data/spec-07-01.data @@ -0,0 +1,3 @@ +%FOO bar baz # Should be ignored + # with a warning. +--- "foo" diff --git a/_test/data/spec-07-01.skip-ext b/_test/data/spec-07-01.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/spec-07-02.canonical b/_test/data/spec-07-02.canonical new file mode 100644 index 0000000..cb7dd1c --- /dev/null +++ b/_test/data/spec-07-02.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "foo" diff --git a/_test/data/spec-07-02.data b/_test/data/spec-07-02.data new file mode 100644 index 0000000..c8b7322 --- /dev/null +++ b/_test/data/spec-07-02.data @@ -0,0 +1,4 @@ +%YAML 1.2 # Attempt parsing + # with a warning +--- +"foo" diff --git a/_test/data/spec-07-02.skip-ext b/_test/data/spec-07-02.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/spec-07-03.data b/_test/data/spec-07-03.data new file mode 100644 index 0000000..4bfa07a --- /dev/null +++ b/_test/data/spec-07-03.data @@ -0,0 +1,3 @@ +%YAML 1.1 +%YAML 1.1 +foo diff --git a/_test/data/spec-07-03.error b/_test/data/spec-07-03.error new file mode 100644 index 0000000..b0ac446 --- /dev/null +++ b/_test/data/spec-07-03.error @@ -0,0 +1,3 @@ +ERROR: +The YAML directive must only be +given at most once per document. diff --git a/_test/data/spec-07-04.canonical b/_test/data/spec-07-04.canonical new file mode 100644 index 0000000..cb7dd1c --- /dev/null +++ b/_test/data/spec-07-04.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "foo" diff --git a/_test/data/spec-07-04.data b/_test/data/spec-07-04.data new file mode 100644 index 0000000..50f5ab9 --- /dev/null +++ b/_test/data/spec-07-04.data @@ -0,0 +1,3 @@ +%TAG !yaml! tag:yaml.org,2002: +--- +!yaml!str "foo" diff --git a/_test/data/spec-07-05.data b/_test/data/spec-07-05.data new file mode 100644 index 0000000..7276eae --- /dev/null +++ b/_test/data/spec-07-05.data @@ -0,0 +1,3 @@ +%TAG ! !foo +%TAG ! !foo +bar diff --git a/_test/data/spec-07-05.error b/_test/data/spec-07-05.error new file mode 100644 index 0000000..5601b19 --- /dev/null +++ b/_test/data/spec-07-05.error @@ -0,0 +1,4 @@ +ERROR: +The TAG directive must only +be given at most once per +handle in the same document. diff --git a/_test/data/spec-07-06.canonical b/_test/data/spec-07-06.canonical new file mode 100644 index 0000000..bddf616 --- /dev/null +++ b/_test/data/spec-07-06.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!seq [ + ! "baz", + ! "string" +] diff --git a/_test/data/spec-07-06.data b/_test/data/spec-07-06.data new file mode 100644 index 0000000..d9854cb --- /dev/null +++ b/_test/data/spec-07-06.data @@ -0,0 +1,5 @@ +%TAG ! !foo +%TAG !yaml! tag:yaml.org,2002: +--- +- !bar "baz" +- !yaml!str "string" diff --git a/_test/data/spec-07-07a.canonical b/_test/data/spec-07-07a.canonical new file mode 100644 index 0000000..fa086df --- /dev/null +++ b/_test/data/spec-07-07a.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +! "bar" diff --git a/_test/data/spec-07-07a.data b/_test/data/spec-07-07a.data new file mode 100644 index 0000000..9d42ec3 --- /dev/null +++ b/_test/data/spec-07-07a.data @@ -0,0 +1,2 @@ +# Private application: +!foo "bar" diff --git a/_test/data/spec-07-07b.canonical b/_test/data/spec-07-07b.canonical new file mode 100644 index 0000000..fe917d8 --- /dev/null +++ b/_test/data/spec-07-07b.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +! "bar" diff --git a/_test/data/spec-07-07b.data b/_test/data/spec-07-07b.data new file mode 100644 index 0000000..2d36d0e --- /dev/null +++ b/_test/data/spec-07-07b.data @@ -0,0 +1,4 @@ +# Migrated to global: +%TAG ! tag:ben-kiki.org,2000:app/ +--- +!foo "bar" diff --git a/_test/data/spec-07-08.canonical b/_test/data/spec-07-08.canonical new file mode 100644 index 0000000..703aa7b --- /dev/null +++ b/_test/data/spec-07-08.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!seq [ + ! "bar", + ! "string", + ! "baz" +] diff --git a/_test/data/spec-07-08.data b/_test/data/spec-07-08.data new file mode 100644 index 0000000..e2c6d9e --- /dev/null +++ b/_test/data/spec-07-08.data @@ -0,0 +1,9 @@ +# Explicitly specify default settings: +%TAG ! ! +%TAG !! tag:yaml.org,2002: +# Named handles have no default: +%TAG !o! tag:ben-kiki.org,2000: +--- +- !foo "bar" +- !!str "string" +- !o!type "baz" diff --git a/_test/data/spec-07-09.canonical b/_test/data/spec-07-09.canonical new file mode 100644 index 0000000..32d9e94 --- /dev/null +++ b/_test/data/spec-07-09.canonical @@ -0,0 +1,9 @@ +%YAML 1.1 +--- +!!str "foo" +%YAML 1.1 +--- +!!str "bar" +%YAML 1.1 +--- +!!str "baz" diff --git a/_test/data/spec-07-09.data b/_test/data/spec-07-09.data new file mode 100644 index 0000000..1209d47 --- /dev/null +++ b/_test/data/spec-07-09.data @@ -0,0 +1,11 @@ +--- +foo +... +# Repeated end marker. +... +--- +bar +# No end marker. +--- +baz +... diff --git a/_test/data/spec-07-10.canonical b/_test/data/spec-07-10.canonical new file mode 100644 index 0000000..1db650a --- /dev/null +++ b/_test/data/spec-07-10.canonical @@ -0,0 +1,15 @@ +%YAML 1.1 +--- +!!str "Root flow scalar" +%YAML 1.1 +--- +!!str "Root block scalar\n" +%YAML 1.1 +--- +!!map { + ? !!str "foo" + : !!str "bar" +} +--- +#!!str "" +!!null "" diff --git a/_test/data/spec-07-10.data b/_test/data/spec-07-10.data new file mode 100644 index 0000000..6939b39 --- /dev/null +++ b/_test/data/spec-07-10.data @@ -0,0 +1,11 @@ +"Root flow + scalar" +--- !!str > + Root block + scalar +--- +# Root collection: +foo : bar +... # Is optional. +--- +# Explicit document may be empty. diff --git a/_test/data/spec-07-11.data b/_test/data/spec-07-11.data new file mode 100644 index 0000000..d11302d --- /dev/null +++ b/_test/data/spec-07-11.data @@ -0,0 +1,2 @@ +# A stream may contain +# no documents. diff --git a/_test/data/spec-07-11.empty b/_test/data/spec-07-11.empty new file mode 100644 index 0000000..bfffa8b --- /dev/null +++ b/_test/data/spec-07-11.empty @@ -0,0 +1,2 @@ +# This stream contains no +# documents, only comments. diff --git a/_test/data/spec-07-12a.canonical b/_test/data/spec-07-12a.canonical new file mode 100644 index 0000000..efc116f --- /dev/null +++ b/_test/data/spec-07-12a.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!map { + ? !!str "foo" + : !!str "bar" +} diff --git a/_test/data/spec-07-12a.data b/_test/data/spec-07-12a.data new file mode 100644 index 0000000..3807d57 --- /dev/null +++ b/_test/data/spec-07-12a.data @@ -0,0 +1,3 @@ +# Implicit document. Root +# collection (mapping) node. +foo : bar diff --git a/_test/data/spec-07-12b.canonical b/_test/data/spec-07-12b.canonical new file mode 100644 index 0000000..04bcffc --- /dev/null +++ b/_test/data/spec-07-12b.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "Text content\n" diff --git a/_test/data/spec-07-12b.data b/_test/data/spec-07-12b.data new file mode 100644 index 0000000..43250db --- /dev/null +++ b/_test/data/spec-07-12b.data @@ -0,0 +1,4 @@ +# Explicit document. Root +# scalar (literal) node. +--- | + Text content diff --git a/_test/data/spec-07-13.canonical b/_test/data/spec-07-13.canonical new file mode 100644 index 0000000..5af71e9 --- /dev/null +++ b/_test/data/spec-07-13.canonical @@ -0,0 +1,9 @@ +%YAML 1.1 +--- +!!str "First document" +--- +! "No directives" +--- +! "With directives" +--- +! "Reset settings" diff --git a/_test/data/spec-07-13.data b/_test/data/spec-07-13.data new file mode 100644 index 0000000..ba7ec63 --- /dev/null +++ b/_test/data/spec-07-13.data @@ -0,0 +1,9 @@ +! "First document" +--- +!foo "No directives" +%TAG ! !foo +--- +!bar "With directives" +%YAML 1.1 +--- +!baz "Reset settings" diff --git a/_test/data/spec-08-01.canonical b/_test/data/spec-08-01.canonical new file mode 100644 index 0000000..69e4161 --- /dev/null +++ b/_test/data/spec-08-01.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? &A1 !!str "foo" + : !!str "bar", + ? &A2 !!str "baz" + : *A1 +} diff --git a/_test/data/spec-08-01.data b/_test/data/spec-08-01.data new file mode 100644 index 0000000..48986ec --- /dev/null +++ b/_test/data/spec-08-01.data @@ -0,0 +1,2 @@ +!!str &a1 "foo" : !!str bar +&a2 baz : *a1 diff --git a/_test/data/spec-08-02.canonical b/_test/data/spec-08-02.canonical new file mode 100644 index 0000000..dd6f76e --- /dev/null +++ b/_test/data/spec-08-02.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "First occurrence" + : &A !!str "Value", + ? !!str "Second occurrence" + : *A +} diff --git a/_test/data/spec-08-02.data b/_test/data/spec-08-02.data new file mode 100644 index 0000000..600d179 --- /dev/null +++ b/_test/data/spec-08-02.data @@ -0,0 +1,2 @@ +First occurrence: &anchor Value +Second occurrence: *anchor diff --git a/_test/data/spec-08-03.canonical b/_test/data/spec-08-03.canonical new file mode 100644 index 0000000..be7ea8f --- /dev/null +++ b/_test/data/spec-08-03.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!map { + ? ! "foo" + : ! "baz" +} diff --git a/_test/data/spec-08-03.data b/_test/data/spec-08-03.data new file mode 100644 index 0000000..8e51f52 --- /dev/null +++ b/_test/data/spec-08-03.data @@ -0,0 +1,2 @@ +! foo : + ! baz diff --git a/_test/data/spec-08-04.data b/_test/data/spec-08-04.data new file mode 100644 index 0000000..f7d1b01 --- /dev/null +++ b/_test/data/spec-08-04.data @@ -0,0 +1,2 @@ +- ! foo +- !<$:?> bar diff --git a/_test/data/spec-08-04.error b/_test/data/spec-08-04.error new file mode 100644 index 0000000..6066375 --- /dev/null +++ b/_test/data/spec-08-04.error @@ -0,0 +1,6 @@ +ERROR: +- Verbatim tags aren't resolved, + so ! is invalid. +- The $:? tag is neither a global + URI tag nor a local tag starting + with “!â€. diff --git a/_test/data/spec-08-05.canonical b/_test/data/spec-08-05.canonical new file mode 100644 index 0000000..a5c710a --- /dev/null +++ b/_test/data/spec-08-05.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!seq [ + ! "foo", + ! "bar", + ! "baz", +] diff --git a/_test/data/spec-08-05.data b/_test/data/spec-08-05.data new file mode 100644 index 0000000..93576ed --- /dev/null +++ b/_test/data/spec-08-05.data @@ -0,0 +1,5 @@ +%TAG !o! tag:ben-kiki.org,2000: +--- +- !local foo +- !!str bar +- !o!type baz diff --git a/_test/data/spec-08-06.data b/_test/data/spec-08-06.data new file mode 100644 index 0000000..8580010 --- /dev/null +++ b/_test/data/spec-08-06.data @@ -0,0 +1,5 @@ +%TAG !o! tag:ben-kiki.org,2000: +--- +- !$a!b foo +- !o! bar +- !h!type baz diff --git a/_test/data/spec-08-06.error b/_test/data/spec-08-06.error new file mode 100644 index 0000000..fb76f42 --- /dev/null +++ b/_test/data/spec-08-06.error @@ -0,0 +1,4 @@ +ERROR: +- The !$a! looks like a handle. +- The !o! handle has no suffix. +- The !h! handle wasn't declared. diff --git a/_test/data/spec-08-07.canonical b/_test/data/spec-08-07.canonical new file mode 100644 index 0000000..e2f43d9 --- /dev/null +++ b/_test/data/spec-08-07.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!seq [ + ! "12", + ! "12", +# ! "12", + ! "12", +] diff --git a/_test/data/spec-08-07.data b/_test/data/spec-08-07.data new file mode 100644 index 0000000..98aa565 --- /dev/null +++ b/_test/data/spec-08-07.data @@ -0,0 +1,4 @@ +# Assuming conventional resolution: +- "12" +- 12 +- ! 12 diff --git a/_test/data/spec-08-08.canonical b/_test/data/spec-08-08.canonical new file mode 100644 index 0000000..d3f8b1a --- /dev/null +++ b/_test/data/spec-08-08.canonical @@ -0,0 +1,15 @@ +%YAML 1.1 +--- +!!map { + ? !!str "foo" + : !!str "bar baz" +} +%YAML 1.1 +--- +!!str "foo bar" +%YAML 1.1 +--- +!!str "foo bar" +%YAML 1.1 +--- +!!str "foo\n" diff --git a/_test/data/spec-08-08.data b/_test/data/spec-08-08.data new file mode 100644 index 0000000..757a93d --- /dev/null +++ b/_test/data/spec-08-08.data @@ -0,0 +1,13 @@ +--- +foo: + "bar + baz" +--- +"foo + bar" +--- +foo + bar +--- | + foo +... diff --git a/_test/data/spec-08-09.canonical b/_test/data/spec-08-09.canonical new file mode 100644 index 0000000..3805daf --- /dev/null +++ b/_test/data/spec-08-09.canonical @@ -0,0 +1,21 @@ +%YAML 1.1 +--- !!map { + ? !!str "scalars" : !!map { + ? !!str "plain" + : !!str "some text", + ? !!str "quoted" + : !!map { + ? !!str "single" + : !!str "some text", + ? !!str "double" + : !!str "some text" + } }, + ? !!str "collections" : !!map { + ? !!str "sequence" : !!seq [ + !!str "entry", + !!map { + ? !!str "key" : !!str "value" + } ], + ? !!str "mapping" : !!map { + ? !!str "key" : !!str "value" +} } } diff --git a/_test/data/spec-08-09.data b/_test/data/spec-08-09.data new file mode 100644 index 0000000..69da042 --- /dev/null +++ b/_test/data/spec-08-09.data @@ -0,0 +1,11 @@ +--- +scalars: + plain: !!str some text + quoted: + single: 'some text' + double: "some text" +collections: + sequence: !!seq [ !!str entry, + # Mapping entry: + key: value ] + mapping: { key: value } diff --git a/_test/data/spec-08-10.canonical b/_test/data/spec-08-10.canonical new file mode 100644 index 0000000..8281c5e --- /dev/null +++ b/_test/data/spec-08-10.canonical @@ -0,0 +1,23 @@ +%YAML 1.1 +--- +!!map { + ? !!str "block styles" : !!map { + ? !!str "scalars" : !!map { + ? !!str "literal" + : !!str "#!/usr/bin/perl\n\ + print \"Hello, + world!\\n\";\n", + ? !!str "folded" + : !!str "This sentence + is false.\n" + }, + ? !!str "collections" : !!map { + ? !!str "sequence" : !!seq [ + !!str "entry", + !!map { + ? !!str "key" : !!str "value" + } + ], + ? !!str "mapping" : !!map { + ? !!str "key" : !!str "value" +} } } } diff --git a/_test/data/spec-08-10.data b/_test/data/spec-08-10.data new file mode 100644 index 0000000..72acc56 --- /dev/null +++ b/_test/data/spec-08-10.data @@ -0,0 +1,15 @@ +block styles: + scalars: + literal: !!str | + #!/usr/bin/perl + print "Hello, world!\n"; + folded: > + This sentence + is false. + collections: !!map + sequence: !!seq # Entry: + - entry # Plain + # Mapping entry: + - key: value + mapping: + key: value diff --git a/_test/data/spec-08-11.canonical b/_test/data/spec-08-11.canonical new file mode 100644 index 0000000..dd6f76e --- /dev/null +++ b/_test/data/spec-08-11.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "First occurrence" + : &A !!str "Value", + ? !!str "Second occurrence" + : *A +} diff --git a/_test/data/spec-08-11.data b/_test/data/spec-08-11.data new file mode 100644 index 0000000..600d179 --- /dev/null +++ b/_test/data/spec-08-11.data @@ -0,0 +1,2 @@ +First occurrence: &anchor Value +Second occurrence: *anchor diff --git a/_test/data/spec-08-12.canonical b/_test/data/spec-08-12.canonical new file mode 100644 index 0000000..93899f4 --- /dev/null +++ b/_test/data/spec-08-12.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!seq [ + !!str "Without properties", + &A !!str "Anchored", + !!str "Tagged", + *A, + !!str "", + !!str "", +] diff --git a/_test/data/spec-08-12.data b/_test/data/spec-08-12.data new file mode 100644 index 0000000..3d4c6b7 --- /dev/null +++ b/_test/data/spec-08-12.data @@ -0,0 +1,8 @@ +[ + Without properties, + &anchor "Anchored", + !!str 'Tagged', + *anchor, # Alias node + !!str , # Empty plain scalar + '', # Empty plain scalar +] diff --git a/_test/data/spec-08-13.canonical b/_test/data/spec-08-13.canonical new file mode 100644 index 0000000..618bb7b --- /dev/null +++ b/_test/data/spec-08-13.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!map { + ? !!str "foo" +# : !!str "", +# ? !!str "" + : !!null "", + ? !!null "" + : !!str "bar", +} diff --git a/_test/data/spec-08-13.data b/_test/data/spec-08-13.data new file mode 100644 index 0000000..ebe663a --- /dev/null +++ b/_test/data/spec-08-13.data @@ -0,0 +1,4 @@ +{ + ? foo :, + ? : bar, +} diff --git a/_test/data/spec-08-13.skip-ext b/_test/data/spec-08-13.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/spec-08-14.canonical b/_test/data/spec-08-14.canonical new file mode 100644 index 0000000..11db439 --- /dev/null +++ b/_test/data/spec-08-14.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!seq [ + !!str "flow in block", + !!str "Block scalar\n", + !!map { + ? !!str "foo" + : !!str "bar" + } +] diff --git a/_test/data/spec-08-14.data b/_test/data/spec-08-14.data new file mode 100644 index 0000000..2fbb1f7 --- /dev/null +++ b/_test/data/spec-08-14.data @@ -0,0 +1,5 @@ +- "flow in block" +- > + Block scalar +- !!map # Block collection + foo : bar diff --git a/_test/data/spec-08-15.canonical b/_test/data/spec-08-15.canonical new file mode 100644 index 0000000..76f028e --- /dev/null +++ b/_test/data/spec-08-15.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!seq [ + !!null "", + !!map { + ? !!str "foo" + : !!null "", + ? !!null "" + : !!str "bar", + } +] diff --git a/_test/data/spec-08-15.data b/_test/data/spec-08-15.data new file mode 100644 index 0000000..7c86bcf --- /dev/null +++ b/_test/data/spec-08-15.data @@ -0,0 +1,5 @@ +- # Empty plain scalar +- ? foo + : + ? + : bar diff --git a/_test/data/spec-09-01.canonical b/_test/data/spec-09-01.canonical new file mode 100644 index 0000000..e71a548 --- /dev/null +++ b/_test/data/spec-09-01.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "simple key" + : !!map { + ? !!str "also simple" + : !!str "value", + ? !!str "not a simple key" + : !!str "any value" + } +} diff --git a/_test/data/spec-09-01.data b/_test/data/spec-09-01.data new file mode 100644 index 0000000..9e83eaf --- /dev/null +++ b/_test/data/spec-09-01.data @@ -0,0 +1,6 @@ +"simple key" : { + "also simple" : value, + ? "not a + simple key" : "any + value" +} diff --git a/_test/data/spec-09-02.canonical b/_test/data/spec-09-02.canonical new file mode 100644 index 0000000..6f8f41a --- /dev/null +++ b/_test/data/spec-09-02.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!str "as space \ + trimmed\n\ + specific\L\n\ + escaped\t\n\ + none" diff --git a/_test/data/spec-09-02.data b/_test/data/spec-09-02.data new file mode 100644 index 0000000..d84883d --- /dev/null +++ b/_test/data/spec-09-02.data @@ -0,0 +1,6 @@ + "as space + trimmed + + specific
 + escaped \
 + none" diff --git a/_test/data/spec-09-03.canonical b/_test/data/spec-09-03.canonical new file mode 100644 index 0000000..658c6df --- /dev/null +++ b/_test/data/spec-09-03.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!seq [ + !!str " last", + !!str " last", + !!str " \tfirst last", +] diff --git a/_test/data/spec-09-03.data b/_test/data/spec-09-03.data new file mode 100644 index 0000000..e0b914d --- /dev/null +++ b/_test/data/spec-09-03.data @@ -0,0 +1,6 @@ +- " + last" +- " + last" +- " first + last" diff --git a/_test/data/spec-09-04.canonical b/_test/data/spec-09-04.canonical new file mode 100644 index 0000000..fa46632 --- /dev/null +++ b/_test/data/spec-09-04.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!str "first \ + inner 1 \ + inner 2 \ + last" diff --git a/_test/data/spec-09-04.data b/_test/data/spec-09-04.data new file mode 100644 index 0000000..313a91b --- /dev/null +++ b/_test/data/spec-09-04.data @@ -0,0 +1,4 @@ + "first + inner 1 + \ inner 2 \ + last" diff --git a/_test/data/spec-09-05.canonical b/_test/data/spec-09-05.canonical new file mode 100644 index 0000000..24d1052 --- /dev/null +++ b/_test/data/spec-09-05.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!seq [ + !!str "first ", + !!str "first\nlast", + !!str "first inner \tlast", +] diff --git a/_test/data/spec-09-05.data b/_test/data/spec-09-05.data new file mode 100644 index 0000000..624c30e --- /dev/null +++ b/_test/data/spec-09-05.data @@ -0,0 +1,8 @@ +- "first + " +- "first + + last" +- "first + inner + \ last" diff --git a/_test/data/spec-09-06.canonical b/_test/data/spec-09-06.canonical new file mode 100644 index 0000000..5028772 --- /dev/null +++ b/_test/data/spec-09-06.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "here's to \"quotes\"" diff --git a/_test/data/spec-09-06.data b/_test/data/spec-09-06.data new file mode 100644 index 0000000..b038078 --- /dev/null +++ b/_test/data/spec-09-06.data @@ -0,0 +1 @@ + 'here''s to "quotes"' diff --git a/_test/data/spec-09-07.canonical b/_test/data/spec-09-07.canonical new file mode 100644 index 0000000..e71a548 --- /dev/null +++ b/_test/data/spec-09-07.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "simple key" + : !!map { + ? !!str "also simple" + : !!str "value", + ? !!str "not a simple key" + : !!str "any value" + } +} diff --git a/_test/data/spec-09-07.data b/_test/data/spec-09-07.data new file mode 100644 index 0000000..755b54a --- /dev/null +++ b/_test/data/spec-09-07.data @@ -0,0 +1,6 @@ +'simple key' : { + 'also simple' : value, + ? 'not a + simple key' : 'any + value' +} diff --git a/_test/data/spec-09-08.canonical b/_test/data/spec-09-08.canonical new file mode 100644 index 0000000..06abdb5 --- /dev/null +++ b/_test/data/spec-09-08.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!str "as space \ + trimmed\n\ + specific\L\n\ + none" diff --git a/_test/data/spec-09-08.data b/_test/data/spec-09-08.data new file mode 100644 index 0000000..aa4d458 --- /dev/null +++ b/_test/data/spec-09-08.data @@ -0,0 +1 @@ + 'as space Â… trimmed Â…Â… specific
… none' diff --git a/_test/data/spec-09-09.canonical b/_test/data/spec-09-09.canonical new file mode 100644 index 0000000..658c6df --- /dev/null +++ b/_test/data/spec-09-09.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!seq [ + !!str " last", + !!str " last", + !!str " \tfirst last", +] diff --git a/_test/data/spec-09-09.data b/_test/data/spec-09-09.data new file mode 100644 index 0000000..52171df --- /dev/null +++ b/_test/data/spec-09-09.data @@ -0,0 +1,6 @@ +- ' + last' +- ' + last' +- ' first + last' diff --git a/_test/data/spec-09-10.canonical b/_test/data/spec-09-10.canonical new file mode 100644 index 0000000..2028d04 --- /dev/null +++ b/_test/data/spec-09-10.canonical @@ -0,0 +1,5 @@ +%YAML 1.1 +--- +!!str "first \ + inner \ + last" diff --git a/_test/data/spec-09-10.data b/_test/data/spec-09-10.data new file mode 100644 index 0000000..0e41449 --- /dev/null +++ b/_test/data/spec-09-10.data @@ -0,0 +1,3 @@ + 'first + inner + last' diff --git a/_test/data/spec-09-11.canonical b/_test/data/spec-09-11.canonical new file mode 100644 index 0000000..4eb222c --- /dev/null +++ b/_test/data/spec-09-11.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!seq [ + !!str "first ", + !!str "first\nlast", +] diff --git a/_test/data/spec-09-11.data b/_test/data/spec-09-11.data new file mode 100644 index 0000000..5efa873 --- /dev/null +++ b/_test/data/spec-09-11.data @@ -0,0 +1,5 @@ +- 'first + ' +- 'first + + last' diff --git a/_test/data/spec-09-12.canonical b/_test/data/spec-09-12.canonical new file mode 100644 index 0000000..d8e6dce --- /dev/null +++ b/_test/data/spec-09-12.canonical @@ -0,0 +1,12 @@ +%YAML 1.1 +--- +!!seq [ + !!str "::std::vector", + !!str "Up, up, and away!", + !!int "-123", + !!seq [ + !!str "::std::vector", + !!str "Up, up, and away!", + !!int "-123", + ] +] diff --git a/_test/data/spec-09-12.data b/_test/data/spec-09-12.data new file mode 100644 index 0000000..b9a3ac5 --- /dev/null +++ b/_test/data/spec-09-12.data @@ -0,0 +1,8 @@ +# Outside flow collection: +- ::std::vector +- Up, up, and away! +- -123 +# Inside flow collection: +- [ '::std::vector', + "Up, up, and away!", + -123 ] diff --git a/_test/data/spec-09-13.canonical b/_test/data/spec-09-13.canonical new file mode 100644 index 0000000..e71a548 --- /dev/null +++ b/_test/data/spec-09-13.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "simple key" + : !!map { + ? !!str "also simple" + : !!str "value", + ? !!str "not a simple key" + : !!str "any value" + } +} diff --git a/_test/data/spec-09-13.data b/_test/data/spec-09-13.data new file mode 100644 index 0000000..b156386 --- /dev/null +++ b/_test/data/spec-09-13.data @@ -0,0 +1,6 @@ +simple key : { + also simple : value, + ? not a + simple key : any + value +} diff --git a/_test/data/spec-09-14.data b/_test/data/spec-09-14.data new file mode 100644 index 0000000..97f2316 --- /dev/null +++ b/_test/data/spec-09-14.data @@ -0,0 +1,14 @@ +--- +--- ||| : foo +... >>>: bar +--- +[ +--- +, +... , +{ +--- : +... # Nested +} +] +... diff --git a/_test/data/spec-09-14.error b/_test/data/spec-09-14.error new file mode 100644 index 0000000..9f3db7b --- /dev/null +++ b/_test/data/spec-09-14.error @@ -0,0 +1,6 @@ +ERROR: + The --- and ... document + start and end markers must + not be specified as the + first content line of a + non-indented plain scalar. diff --git a/_test/data/spec-09-15.canonical b/_test/data/spec-09-15.canonical new file mode 100644 index 0000000..df02040 --- /dev/null +++ b/_test/data/spec-09-15.canonical @@ -0,0 +1,18 @@ +%YAML 1.1 +--- +!!map { + ? !!str "---" + : !!str "foo", + ? !!str "..." + : !!str "bar" +} +%YAML 1.1 +--- +!!seq [ + !!str "---", + !!str "...", + !!map { + ? !!str "---" + : !!str "..." + } +] diff --git a/_test/data/spec-09-15.data b/_test/data/spec-09-15.data new file mode 100644 index 0000000..e6863b0 --- /dev/null +++ b/_test/data/spec-09-15.data @@ -0,0 +1,13 @@ +--- +"---" : foo +...: bar +--- +[ +---, +..., +{ +? --- +: ... +} +] +... diff --git a/_test/data/spec-09-16.canonical b/_test/data/spec-09-16.canonical new file mode 100644 index 0000000..06abdb5 --- /dev/null +++ b/_test/data/spec-09-16.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!str "as space \ + trimmed\n\ + specific\L\n\ + none" diff --git a/_test/data/spec-09-16.data b/_test/data/spec-09-16.data new file mode 100644 index 0000000..473beb9 --- /dev/null +++ b/_test/data/spec-09-16.data @@ -0,0 +1,3 @@ +# Tabs are confusing: +# as space/trimmed/specific/none + as space Â… trimmed Â…Â… specific
… none diff --git a/_test/data/spec-09-17.canonical b/_test/data/spec-09-17.canonical new file mode 100644 index 0000000..68cb70d --- /dev/null +++ b/_test/data/spec-09-17.canonical @@ -0,0 +1,4 @@ +%YAML 1.1 +--- +!!str "first line\n\ + more line" diff --git a/_test/data/spec-09-17.data b/_test/data/spec-09-17.data new file mode 100644 index 0000000..97bc46c --- /dev/null +++ b/_test/data/spec-09-17.data @@ -0,0 +1,3 @@ + first line + + more line diff --git a/_test/data/spec-09-18.canonical b/_test/data/spec-09-18.canonical new file mode 100644 index 0000000..f21428f --- /dev/null +++ b/_test/data/spec-09-18.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!seq [ + !!str "literal\n", + !!str " folded\n", + !!str "keep\n\n", + !!str " strip", +] diff --git a/_test/data/spec-09-18.data b/_test/data/spec-09-18.data new file mode 100644 index 0000000..68c5d7c --- /dev/null +++ b/_test/data/spec-09-18.data @@ -0,0 +1,9 @@ +- | # Just the style + literal +- >1 # Indentation indicator + folded +- |+ # Chomping indicator + keep + +- >-1 # Both indicators + strip diff --git a/_test/data/spec-09-19.canonical b/_test/data/spec-09-19.canonical new file mode 100644 index 0000000..3e828d7 --- /dev/null +++ b/_test/data/spec-09-19.canonical @@ -0,0 +1,6 @@ +%YAML 1.1 +--- +!!seq [ + !!str "literal\n", + !!str "folded\n", +] diff --git a/_test/data/spec-09-19.data b/_test/data/spec-09-19.data new file mode 100644 index 0000000..f0e589d --- /dev/null +++ b/_test/data/spec-09-19.data @@ -0,0 +1,4 @@ +- | + literal +- > + folded diff --git a/_test/data/spec-09-20.canonical b/_test/data/spec-09-20.canonical new file mode 100644 index 0000000..d03bef5 --- /dev/null +++ b/_test/data/spec-09-20.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!seq [ + !!str "detected\n", + !!str "\n\n# detected\n", + !!str " explicit\n", + !!str "\t\ndetected\n", +] diff --git a/_test/data/spec-09-20.data b/_test/data/spec-09-20.data new file mode 100644 index 0000000..39bee04 --- /dev/null +++ b/_test/data/spec-09-20.data @@ -0,0 +1,11 @@ +- | + detected +- > + + + # detected +- |1 + explicit +- > + + detected diff --git a/_test/data/spec-09-20.skip-ext b/_test/data/spec-09-20.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/spec-09-21.data b/_test/data/spec-09-21.data new file mode 100644 index 0000000..0fdd14f --- /dev/null +++ b/_test/data/spec-09-21.data @@ -0,0 +1,8 @@ +- | + + text +- > + text + text +- |1 + text diff --git a/_test/data/spec-09-21.error b/_test/data/spec-09-21.error new file mode 100644 index 0000000..1379ca5 --- /dev/null +++ b/_test/data/spec-09-21.error @@ -0,0 +1,7 @@ +ERROR: +- A leading all-space line must + not have too many spaces. +- A following text line must + not be less indented. +- The text is less indented + than the indicated level. diff --git a/_test/data/spec-09-22.canonical b/_test/data/spec-09-22.canonical new file mode 100644 index 0000000..c1bbcd2 --- /dev/null +++ b/_test/data/spec-09-22.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!map { + ? !!str "strip" + : !!str "text", + ? !!str "clip" + : !!str "text\n", + ? !!str "keep" + : !!str "text\L", +} diff --git a/_test/data/spec-09-22.data b/_test/data/spec-09-22.data new file mode 100644 index 0000000..0dd51eb --- /dev/null +++ b/_test/data/spec-09-22.data @@ -0,0 +1,4 @@ +strip: |- + text
clip: | + textÂ…keep: |+ + text
 \ No newline at end of file diff --git a/_test/data/spec-09-23.canonical b/_test/data/spec-09-23.canonical new file mode 100644 index 0000000..c4444ca --- /dev/null +++ b/_test/data/spec-09-23.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!map { + ? !!str "strip" + : !!str "# text", + ? !!str "clip" + : !!str "# text\n", + ? !!str "keep" + : !!str "# text\L\n", +} diff --git a/_test/data/spec-09-23.data b/_test/data/spec-09-23.data new file mode 100644 index 0000000..8972d2b --- /dev/null +++ b/_test/data/spec-09-23.data @@ -0,0 +1,11 @@ + # Strip + # Comments: +strip: |- + # text
 
 # Clip + # comments: +Â…clip: | + # textÂ… 
 # Keep + # comments: +Â…keep: |+ + # text
… # Trail + # comments. diff --git a/_test/data/spec-09-24.canonical b/_test/data/spec-09-24.canonical new file mode 100644 index 0000000..45a99b0 --- /dev/null +++ b/_test/data/spec-09-24.canonical @@ -0,0 +1,10 @@ +%YAML 1.1 +--- +!!map { + ? !!str "strip" + : !!str "", + ? !!str "clip" + : !!str "", + ? !!str "keep" + : !!str "\n", +} diff --git a/_test/data/spec-09-24.data b/_test/data/spec-09-24.data new file mode 100644 index 0000000..de0b64b --- /dev/null +++ b/_test/data/spec-09-24.data @@ -0,0 +1,6 @@ +strip: >- + +clip: > + +keep: |+ + diff --git a/_test/data/spec-09-25.canonical b/_test/data/spec-09-25.canonical new file mode 100644 index 0000000..9d2327b --- /dev/null +++ b/_test/data/spec-09-25.canonical @@ -0,0 +1,4 @@ +%YAML 1.1 +--- +!!str "literal\n\ + \ttext\n" diff --git a/_test/data/spec-09-25.data b/_test/data/spec-09-25.data new file mode 100644 index 0000000..f6303a1 --- /dev/null +++ b/_test/data/spec-09-25.data @@ -0,0 +1,3 @@ +| # Simple block scalar + literal + text diff --git a/_test/data/spec-09-26.canonical b/_test/data/spec-09-26.canonical new file mode 100644 index 0000000..3029a11 --- /dev/null +++ b/_test/data/spec-09-26.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "\n\nliteral\n\ntext\n" diff --git a/_test/data/spec-09-26.data b/_test/data/spec-09-26.data new file mode 100644 index 0000000..f28555a --- /dev/null +++ b/_test/data/spec-09-26.data @@ -0,0 +1,8 @@ +| + + + literal + + text + + # Comment diff --git a/_test/data/spec-09-27.canonical b/_test/data/spec-09-27.canonical new file mode 100644 index 0000000..3029a11 --- /dev/null +++ b/_test/data/spec-09-27.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "\n\nliteral\n\ntext\n" diff --git a/_test/data/spec-09-27.data b/_test/data/spec-09-27.data new file mode 100644 index 0000000..f28555a --- /dev/null +++ b/_test/data/spec-09-27.data @@ -0,0 +1,8 @@ +| + + + literal + + text + + # Comment diff --git a/_test/data/spec-09-28.canonical b/_test/data/spec-09-28.canonical new file mode 100644 index 0000000..3029a11 --- /dev/null +++ b/_test/data/spec-09-28.canonical @@ -0,0 +1,3 @@ +%YAML 1.1 +--- +!!str "\n\nliteral\n\ntext\n" diff --git a/_test/data/spec-09-28.data b/_test/data/spec-09-28.data new file mode 100644 index 0000000..f28555a --- /dev/null +++ b/_test/data/spec-09-28.data @@ -0,0 +1,8 @@ +| + + + literal + + text + + # Comment diff --git a/_test/data/spec-09-29.canonical b/_test/data/spec-09-29.canonical new file mode 100644 index 0000000..0980789 --- /dev/null +++ b/_test/data/spec-09-29.canonical @@ -0,0 +1,4 @@ +%YAML 1.1 +--- +!!str "folded text\n\ + \tlines\n" diff --git a/_test/data/spec-09-29.data b/_test/data/spec-09-29.data new file mode 100644 index 0000000..82e611f --- /dev/null +++ b/_test/data/spec-09-29.data @@ -0,0 +1,4 @@ +> # Simple folded scalar + folded + text + lines diff --git a/_test/data/spec-09-30.canonical b/_test/data/spec-09-30.canonical new file mode 100644 index 0000000..fc37db1 --- /dev/null +++ b/_test/data/spec-09-30.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!str "folded line\n\ + next line\n\n\ + \ * bullet\n\ + \ * list\n\n\ + last line\n" diff --git a/_test/data/spec-09-30.data b/_test/data/spec-09-30.data new file mode 100644 index 0000000..a4d8c36 --- /dev/null +++ b/_test/data/spec-09-30.data @@ -0,0 +1,14 @@ +> + folded + line + + next + line + + * bullet + * list + + last + line + +# Comment diff --git a/_test/data/spec-09-31.canonical b/_test/data/spec-09-31.canonical new file mode 100644 index 0000000..fc37db1 --- /dev/null +++ b/_test/data/spec-09-31.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!str "folded line\n\ + next line\n\n\ + \ * bullet\n\ + \ * list\n\n\ + last line\n" diff --git a/_test/data/spec-09-31.data b/_test/data/spec-09-31.data new file mode 100644 index 0000000..a4d8c36 --- /dev/null +++ b/_test/data/spec-09-31.data @@ -0,0 +1,14 @@ +> + folded + line + + next + line + + * bullet + * list + + last + line + +# Comment diff --git a/_test/data/spec-09-32.canonical b/_test/data/spec-09-32.canonical new file mode 100644 index 0000000..fc37db1 --- /dev/null +++ b/_test/data/spec-09-32.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!str "folded line\n\ + next line\n\n\ + \ * bullet\n\ + \ * list\n\n\ + last line\n" diff --git a/_test/data/spec-09-32.data b/_test/data/spec-09-32.data new file mode 100644 index 0000000..a4d8c36 --- /dev/null +++ b/_test/data/spec-09-32.data @@ -0,0 +1,14 @@ +> + folded + line + + next + line + + * bullet + * list + + last + line + +# Comment diff --git a/_test/data/spec-09-33.canonical b/_test/data/spec-09-33.canonical new file mode 100644 index 0000000..fc37db1 --- /dev/null +++ b/_test/data/spec-09-33.canonical @@ -0,0 +1,7 @@ +%YAML 1.1 +--- +!!str "folded line\n\ + next line\n\n\ + \ * bullet\n\ + \ * list\n\n\ + last line\n" diff --git a/_test/data/spec-09-33.data b/_test/data/spec-09-33.data new file mode 100644 index 0000000..a4d8c36 --- /dev/null +++ b/_test/data/spec-09-33.data @@ -0,0 +1,14 @@ +> + folded + line + + next + line + + * bullet + * list + + last + line + +# Comment diff --git a/_test/data/spec-10-01.canonical b/_test/data/spec-10-01.canonical new file mode 100644 index 0000000..d08cdd4 --- /dev/null +++ b/_test/data/spec-10-01.canonical @@ -0,0 +1,12 @@ +%YAML 1.1 +--- +!!seq [ + !!seq [ + !!str "inner", + !!str "inner", + ], + !!seq [ + !!str "inner", + !!str "last", + ], +] diff --git a/_test/data/spec-10-01.data b/_test/data/spec-10-01.data new file mode 100644 index 0000000..e668d38 --- /dev/null +++ b/_test/data/spec-10-01.data @@ -0,0 +1,2 @@ +- [ inner, inner, ] +- [inner,last] diff --git a/_test/data/spec-10-02.canonical b/_test/data/spec-10-02.canonical new file mode 100644 index 0000000..82fe0d9 --- /dev/null +++ b/_test/data/spec-10-02.canonical @@ -0,0 +1,14 @@ +%YAML 1.1 +--- +!!seq [ + !!str "double quoted", + !!str "single quoted", + !!str "plain text", + !!seq [ + !!str "nested", + ], + !!map { + ? !!str "single" + : !!str "pair" + } +] diff --git a/_test/data/spec-10-02.data b/_test/data/spec-10-02.data new file mode 100644 index 0000000..3b23351 --- /dev/null +++ b/_test/data/spec-10-02.data @@ -0,0 +1,8 @@ +[ +"double + quoted", 'single + quoted', +plain + text, [ nested ], +single: pair , +] diff --git a/_test/data/spec-10-03.canonical b/_test/data/spec-10-03.canonical new file mode 100644 index 0000000..1443395 --- /dev/null +++ b/_test/data/spec-10-03.canonical @@ -0,0 +1,12 @@ +%YAML 1.1 +--- +!!map { + ? !!str "block" + : !!seq [ + !!str "one", + !!map { + ? !!str "two" + : !!str "three" + } + ] +} diff --git a/_test/data/spec-10-03.data b/_test/data/spec-10-03.data new file mode 100644 index 0000000..9e15f83 --- /dev/null +++ b/_test/data/spec-10-03.data @@ -0,0 +1,4 @@ +block: # Block + # sequence +- one +- two : three diff --git a/_test/data/spec-10-04.canonical b/_test/data/spec-10-04.canonical new file mode 100644 index 0000000..ae486a3 --- /dev/null +++ b/_test/data/spec-10-04.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "block" + : !!seq [ + !!str "one", + !!seq [ + !!str "two" + ] + ] +} diff --git a/_test/data/spec-10-04.data b/_test/data/spec-10-04.data new file mode 100644 index 0000000..2905b0d --- /dev/null +++ b/_test/data/spec-10-04.data @@ -0,0 +1,4 @@ +block: +- one +- + - two diff --git a/_test/data/spec-10-05.canonical b/_test/data/spec-10-05.canonical new file mode 100644 index 0000000..07cc0c9 --- /dev/null +++ b/_test/data/spec-10-05.canonical @@ -0,0 +1,14 @@ +%YAML 1.1 +--- +!!seq [ + !!null "", + !!str "block node\n", + !!seq [ + !!str "one", + !!str "two", + ], + !!map { + ? !!str "one" + : !!str "two", + } +] diff --git a/_test/data/spec-10-05.data b/_test/data/spec-10-05.data new file mode 100644 index 0000000..f19a99e --- /dev/null +++ b/_test/data/spec-10-05.data @@ -0,0 +1,7 @@ +- # Empty +- | + block node +- - one # in-line + - two # sequence +- one: two # in-line + # mapping diff --git a/_test/data/spec-10-06.canonical b/_test/data/spec-10-06.canonical new file mode 100644 index 0000000..d9986c2 --- /dev/null +++ b/_test/data/spec-10-06.canonical @@ -0,0 +1,16 @@ +%YAML 1.1 +--- +!!seq [ + !!map { + ? !!str "inner" + : !!str "entry", + ? !!str "also" + : !!str "inner" + }, + !!map { + ? !!str "inner" + : !!str "entry", + ? !!str "last" + : !!str "entry" + } +] diff --git a/_test/data/spec-10-06.data b/_test/data/spec-10-06.data new file mode 100644 index 0000000..860ba25 --- /dev/null +++ b/_test/data/spec-10-06.data @@ -0,0 +1,2 @@ +- { inner : entry , also: inner , } +- {inner: entry,last : entry} diff --git a/_test/data/spec-10-07.canonical b/_test/data/spec-10-07.canonical new file mode 100644 index 0000000..ec74230 --- /dev/null +++ b/_test/data/spec-10-07.canonical @@ -0,0 +1,16 @@ +%YAML 1.1 +--- +!!map { + ? !!null "" + : !!str "value", + ? !!str "explicit key" + : !!str "value", + ? !!str "simple key" + : !!str "value", + ? !!seq [ + !!str "collection", + !!str "simple", + !!str "key" + ] + : !!str "value" +} diff --git a/_test/data/spec-10-07.data b/_test/data/spec-10-07.data new file mode 100644 index 0000000..ff943fb --- /dev/null +++ b/_test/data/spec-10-07.data @@ -0,0 +1,7 @@ +{ +? : value, # Empty key +? explicit + key: value, +simple key : value, +[ collection, simple, key ]: value +} diff --git a/_test/data/spec-10-08.data b/_test/data/spec-10-08.data new file mode 100644 index 0000000..55bd788 --- /dev/null +++ b/_test/data/spec-10-08.data @@ -0,0 +1,5 @@ +{ +multi-line + simple key : value, +very long ...................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................(>1KB)................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................... key: value +} diff --git a/_test/data/spec-10-08.error b/_test/data/spec-10-08.error new file mode 100644 index 0000000..3979e1f --- /dev/null +++ b/_test/data/spec-10-08.error @@ -0,0 +1,5 @@ +ERROR: +- A simple key is restricted + to only one line. +- A simple key must not be + longer than 1024 characters. diff --git a/_test/data/spec-10-09.canonical b/_test/data/spec-10-09.canonical new file mode 100644 index 0000000..4d9827b --- /dev/null +++ b/_test/data/spec-10-09.canonical @@ -0,0 +1,8 @@ +%YAML 1.1 +--- +!!map { + ? !!str "key" + : !!str "value", + ? !!str "empty" + : !!null "", +} diff --git a/_test/data/spec-10-09.data b/_test/data/spec-10-09.data new file mode 100644 index 0000000..4d55e21 --- /dev/null +++ b/_test/data/spec-10-09.data @@ -0,0 +1,4 @@ +{ +key : value, +empty: # empty value↓ +} diff --git a/_test/data/spec-10-10.canonical b/_test/data/spec-10-10.canonical new file mode 100644 index 0000000..016fb64 --- /dev/null +++ b/_test/data/spec-10-10.canonical @@ -0,0 +1,16 @@ +%YAML 1.1 +--- +!!map { + ? !!str "explicit key1" + : !!str "explicit value", + ? !!str "explicit key2" + : !!null "", + ? !!str "explicit key3" + : !!null "", + ? !!str "simple key1" + : !!str "explicit value", + ? !!str "simple key2" + : !!null "", + ? !!str "simple key3" + : !!null "", +} diff --git a/_test/data/spec-10-10.data b/_test/data/spec-10-10.data new file mode 100644 index 0000000..0888b05 --- /dev/null +++ b/_test/data/spec-10-10.data @@ -0,0 +1,8 @@ +{ +? explicit key1 : explicit value, +? explicit key2 : , # Explicit empty +? explicit key3, # Empty value +simple key1 : explicit value, +simple key2 : , # Explicit empty +simple key3, # Empty value +} diff --git a/_test/data/spec-10-11.canonical b/_test/data/spec-10-11.canonical new file mode 100644 index 0000000..7309544 --- /dev/null +++ b/_test/data/spec-10-11.canonical @@ -0,0 +1,24 @@ +%YAML 1.1 +--- +!!seq [ + !!map { + ? !!str "explicit key1" + : !!str "explicit value", + }, + !!map { + ? !!str "explicit key2" + : !!null "", + }, + !!map { + ? !!str "explicit key3" + : !!null "", + }, + !!map { + ? !!str "simple key1" + : !!str "explicit value", + }, + !!map { + ? !!str "simple key2" + : !!null "", + }, +] diff --git a/_test/data/spec-10-11.data b/_test/data/spec-10-11.data new file mode 100644 index 0000000..9f05568 --- /dev/null +++ b/_test/data/spec-10-11.data @@ -0,0 +1,7 @@ +[ +? explicit key1 : explicit value, +? explicit key2 : , # Explicit empty +? explicit key3, # Implicit empty +simple key1 : explicit value, +simple key2 : , # Explicit empty +] diff --git a/_test/data/spec-10-12.canonical b/_test/data/spec-10-12.canonical new file mode 100644 index 0000000..a95dd40 --- /dev/null +++ b/_test/data/spec-10-12.canonical @@ -0,0 +1,9 @@ +%YAML 1.1 +--- +!!map { + ? !!str "block" + : !!map { + ? !!str "key" + : !!str "value" + } +} diff --git a/_test/data/spec-10-12.data b/_test/data/spec-10-12.data new file mode 100644 index 0000000..5521443 --- /dev/null +++ b/_test/data/spec-10-12.data @@ -0,0 +1,3 @@ +block: # Block + # mapping + key: value diff --git a/_test/data/spec-10-13.canonical b/_test/data/spec-10-13.canonical new file mode 100644 index 0000000..e183c50 --- /dev/null +++ b/_test/data/spec-10-13.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "explicit key" + : !!null "", + ? !!str "block key\n" + : !!seq [ + !!str "one", + !!str "two", + ] +} diff --git a/_test/data/spec-10-13.data b/_test/data/spec-10-13.data new file mode 100644 index 0000000..b5b97db --- /dev/null +++ b/_test/data/spec-10-13.data @@ -0,0 +1,5 @@ +? explicit key # implicit value +? | + block key +: - one # explicit in-line + - two # block value diff --git a/_test/data/spec-10-14.canonical b/_test/data/spec-10-14.canonical new file mode 100644 index 0000000..e87c880 --- /dev/null +++ b/_test/data/spec-10-14.canonical @@ -0,0 +1,11 @@ +%YAML 1.1 +--- +!!map { + ? !!str "plain key" + : !!null "", + ? !!str "quoted key" + : !!seq [ + !!str "one", + !!str "two", + ] +} diff --git a/_test/data/spec-10-14.data b/_test/data/spec-10-14.data new file mode 100644 index 0000000..7f5995c --- /dev/null +++ b/_test/data/spec-10-14.data @@ -0,0 +1,4 @@ +plain key: # empty value +"quoted key": +- one # explicit next-line +- two # block value diff --git a/_test/data/spec-10-15.canonical b/_test/data/spec-10-15.canonical new file mode 100644 index 0000000..85fbbd0 --- /dev/null +++ b/_test/data/spec-10-15.canonical @@ -0,0 +1,18 @@ +%YAML 1.1 +--- +!!seq [ + !!map { + ? !!str "sun" + : !!str "yellow" + }, + !!map { + ? !!map { + ? !!str "earth" + : !!str "blue" + } + : !!map { + ? !!str "moon" + : !!str "white" + } + } +] diff --git a/_test/data/spec-10-15.data b/_test/data/spec-10-15.data new file mode 100644 index 0000000..d675cfd --- /dev/null +++ b/_test/data/spec-10-15.data @@ -0,0 +1,3 @@ +- sun: yellow +- ? earth: blue + : moon: white diff --git a/_test/data/str.data b/_test/data/str.data new file mode 100644 index 0000000..7cbdb7c --- /dev/null +++ b/_test/data/str.data @@ -0,0 +1 @@ +- abcd diff --git a/_test/data/str.detect b/_test/data/str.detect new file mode 100644 index 0000000..7d5026f --- /dev/null +++ b/_test/data/str.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:str diff --git a/_test/data/tags.events b/_test/data/tags.events new file mode 100644 index 0000000..bb93dce --- /dev/null +++ b/_test/data/tags.events @@ -0,0 +1,12 @@ +- !StreamStart +- !DocumentStart +- !SequenceStart +- !Scalar { value: 'data' } +#- !Scalar { tag: '!', value: 'data' } +- !Scalar { tag: 'tag:yaml.org,2002:str', value: 'data' } +- !Scalar { tag: '!myfunnytag', value: 'data' } +- !Scalar { tag: '!my!ugly!tag', value: 'data' } +- !Scalar { tag: 'tag:my.domain.org,2002:data!? #', value: 'data' } +- !SequenceEnd +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/test_mark.marks b/_test/data/test_mark.marks new file mode 100644 index 0000000..7b08ee4 --- /dev/null +++ b/_test/data/test_mark.marks @@ -0,0 +1,38 @@ +--- +*The first line. +The last line. +--- +The first*line. +The last line. +--- +The first line.* +The last line. +--- +The first line. +*The last line. +--- +The first line. +The last*line. +--- +The first line. +The last line.* +--- +The first line. +*The selected line. +The last line. +--- +The first line. +The selected*line. +The last line. +--- +The first line. +The selected line.* +The last line. +--- +*The only line. +--- +The only*line. +--- +The only line.* +--- +Loooooooooooooooooooooooooooooooooooooooooooooong*Liiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiine diff --git a/_test/data/timestamp-bugs.code b/_test/data/timestamp-bugs.code new file mode 100644 index 0000000..b1d6e9c --- /dev/null +++ b/_test/data/timestamp-bugs.code @@ -0,0 +1,8 @@ +[ + datetime.datetime(2001, 12, 15, 3, 29, 43, 100000), + datetime.datetime(2001, 12, 14, 16, 29, 43, 100000), + datetime.datetime(2001, 12, 14, 21, 59, 43, 1010), + datetime.datetime(2001, 12, 14, 21, 59, 43, 0, FixedOffset(60, "+1")), + datetime.datetime(2001, 12, 14, 21, 59, 43, 0, FixedOffset(-90, "-1:30")), + datetime.datetime(2005, 7, 8, 17, 35, 4, 517600), +] diff --git a/_test/data/timestamp-bugs.data b/_test/data/timestamp-bugs.data new file mode 100644 index 0000000..721d290 --- /dev/null +++ b/_test/data/timestamp-bugs.data @@ -0,0 +1,6 @@ +- 2001-12-14 21:59:43.10 -5:30 +- 2001-12-14 21:59:43.10 +5:30 +- 2001-12-14 21:59:43.00101 +- 2001-12-14 21:59:43+1 +- 2001-12-14 21:59:43-1:30 +- 2005-07-08 17:35:04.517600 diff --git a/_test/data/timestamp.data b/_test/data/timestamp.data new file mode 100644 index 0000000..7d214ce --- /dev/null +++ b/_test/data/timestamp.data @@ -0,0 +1,5 @@ +- 2001-12-15T02:59:43.1Z +- 2001-12-14t21:59:43.10-05:00 +- 2001-12-14 21:59:43.10 -5 +- 2001-12-15 2:59:43.10 +- 2002-12-14 diff --git a/_test/data/timestamp.detect b/_test/data/timestamp.detect new file mode 100644 index 0000000..2013936 --- /dev/null +++ b/_test/data/timestamp.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:timestamp diff --git a/_test/data/unclosed-bracket.loader-error b/_test/data/unclosed-bracket.loader-error new file mode 100644 index 0000000..8c82077 --- /dev/null +++ b/_test/data/unclosed-bracket.loader-error @@ -0,0 +1,6 @@ +test: + - [ foo: bar +# comment the rest of the stream to let the scanner detect the problem. +# - baz +#"we could have detected the unclosed bracket on the above line, but this would forbid such syntax as": { +#} diff --git a/_test/data/unclosed-quoted-scalar.loader-error b/_test/data/unclosed-quoted-scalar.loader-error new file mode 100644 index 0000000..8537429 --- /dev/null +++ b/_test/data/unclosed-quoted-scalar.loader-error @@ -0,0 +1,2 @@ +'foo + bar diff --git a/_test/data/undefined-anchor.loader-error b/_test/data/undefined-anchor.loader-error new file mode 100644 index 0000000..9469103 --- /dev/null +++ b/_test/data/undefined-anchor.loader-error @@ -0,0 +1,3 @@ +- foo +- &bar baz +- *bat diff --git a/_test/data/undefined-constructor.loader-error b/_test/data/undefined-constructor.loader-error new file mode 100644 index 0000000..9a37ccc --- /dev/null +++ b/_test/data/undefined-constructor.loader-error @@ -0,0 +1 @@ +--- !foo bar diff --git a/_test/data/undefined-tag-handle.loader-error b/_test/data/undefined-tag-handle.loader-error new file mode 100644 index 0000000..82ba335 --- /dev/null +++ b/_test/data/undefined-tag-handle.loader-error @@ -0,0 +1 @@ +--- !foo!bar baz diff --git a/_test/data/unknown.dumper-error b/_test/data/unknown.dumper-error new file mode 100644 index 0000000..83204d2 --- /dev/null +++ b/_test/data/unknown.dumper-error @@ -0,0 +1 @@ +yaml.safe_dump(object) diff --git a/_test/data/unsupported-version.emitter-error b/_test/data/unsupported-version.emitter-error new file mode 100644 index 0000000..f9c6197 --- /dev/null +++ b/_test/data/unsupported-version.emitter-error @@ -0,0 +1,5 @@ +- !StreamStart +- !DocumentStart { version: [5,6] } +- !Scalar { value: foo } +- !DocumentEnd +- !StreamEnd diff --git a/_test/data/utf16be.code b/_test/data/utf16be.code new file mode 100644 index 0000000..c45b371 --- /dev/null +++ b/_test/data/utf16be.code @@ -0,0 +1 @@ +"UTF-16-BE" diff --git a/_test/data/utf16be.data b/_test/data/utf16be.data new file mode 100644 index 0000000..50dcfae Binary files /dev/null and b/_test/data/utf16be.data differ diff --git a/_test/data/utf16le.code b/_test/data/utf16le.code new file mode 100644 index 0000000..400530a --- /dev/null +++ b/_test/data/utf16le.code @@ -0,0 +1 @@ +"UTF-16-LE" diff --git a/_test/data/utf16le.data b/_test/data/utf16le.data new file mode 100644 index 0000000..76f5e73 Binary files /dev/null and b/_test/data/utf16le.data differ diff --git a/_test/data/utf8-implicit.code b/_test/data/utf8-implicit.code new file mode 100644 index 0000000..29326db --- /dev/null +++ b/_test/data/utf8-implicit.code @@ -0,0 +1 @@ +"implicit UTF-8" diff --git a/_test/data/utf8-implicit.data b/_test/data/utf8-implicit.data new file mode 100644 index 0000000..9d8081e --- /dev/null +++ b/_test/data/utf8-implicit.data @@ -0,0 +1 @@ +--- implicit UTF-8 diff --git a/_test/data/utf8.code b/_test/data/utf8.code new file mode 100644 index 0000000..dcf11cc --- /dev/null +++ b/_test/data/utf8.code @@ -0,0 +1 @@ +"UTF-8" diff --git a/_test/data/utf8.data b/_test/data/utf8.data new file mode 100644 index 0000000..686f48a --- /dev/null +++ b/_test/data/utf8.data @@ -0,0 +1 @@ +--- UTF-8 diff --git a/_test/data/util/00_ok.yaml b/_test/data/util/00_ok.yaml new file mode 100644 index 0000000..adc4adf --- /dev/null +++ b/_test/data/util/00_ok.yaml @@ -0,0 +1,3 @@ +- abc +- ghi # some comment +- klm diff --git a/_test/data/util/01_second_rt_ok.yaml b/_test/data/util/01_second_rt_ok.yaml new file mode 100644 index 0000000..de19513 --- /dev/null +++ b/_test/data/util/01_second_rt_ok.yaml @@ -0,0 +1,3 @@ +- abc +- ghi # some comment +- klm diff --git a/_test/data/util/02_not_ok.yaml b/_test/data/util/02_not_ok.yaml new file mode 100644 index 0000000..945e5ec --- /dev/null +++ b/_test/data/util/02_not_ok.yaml @@ -0,0 +1,2 @@ +123 # single scalar cannot have comment +... diff --git a/_test/data/util/03_no_comment_ok.yaml b/_test/data/util/03_no_comment_ok.yaml new file mode 100644 index 0000000..081284a --- /dev/null +++ b/_test/data/util/03_no_comment_ok.yaml @@ -0,0 +1,2 @@ +123 +... diff --git a/_test/data/valid_escape_characters.code b/_test/data/valid_escape_characters.code new file mode 100644 index 0000000..0434f0c --- /dev/null +++ b/_test/data/valid_escape_characters.code @@ -0,0 +1 @@ +"\" \\ / \b \f \n \r \t" diff --git a/_test/data/valid_escape_characters.data b/_test/data/valid_escape_characters.data new file mode 100644 index 0000000..a28e216 --- /dev/null +++ b/_test/data/valid_escape_characters.data @@ -0,0 +1 @@ +"\" \\ \/ \b \f \n \r \t" diff --git a/_test/data/valid_escape_characters.skip-ext b/_test/data/valid_escape_characters.skip-ext new file mode 100644 index 0000000..e69de29 diff --git a/_test/data/value.data b/_test/data/value.data new file mode 100644 index 0000000..c5b7680 --- /dev/null +++ b/_test/data/value.data @@ -0,0 +1 @@ +- = diff --git a/_test/data/value.detect b/_test/data/value.detect new file mode 100644 index 0000000..7c37d02 --- /dev/null +++ b/_test/data/value.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:value diff --git a/_test/data/yaml.data b/_test/data/yaml.data new file mode 100644 index 0000000..a4bb3f8 --- /dev/null +++ b/_test/data/yaml.data @@ -0,0 +1,3 @@ +- !!yaml '!' +- !!yaml '&' +- !!yaml '*' diff --git a/_test/data/yaml.detect b/_test/data/yaml.detect new file mode 100644 index 0000000..e2cf189 --- /dev/null +++ b/_test/data/yaml.detect @@ -0,0 +1 @@ +tag:yaml.org,2002:yaml diff --git a/_test/lib/canonical.py b/_test/lib/canonical.py new file mode 100644 index 0000000..56fa297 --- /dev/null +++ b/_test/lib/canonical.py @@ -0,0 +1,387 @@ +import ruyaml +from ruyaml.composer import Composer +from ruyaml.constructor import Constructor +from ruyaml.resolver import Resolver + + +class CanonicalError(ruyaml.YAMLError): + pass + + +class CanonicalScanner: + def __init__(self, data): + try: + if isinstance(data, bytes): + data = data.decode('utf-8') + except UnicodeDecodeError: + raise CanonicalError('utf-8 stream is expected') + self.data = data + '\0' + self.index = 0 + self.tokens = [] + self.scanned = False + + def check_token(self, *choices): + if not self.scanned: + self.scan() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + if not self.scanned: + self.scan() + if self.tokens: + return self.tokens[0] + + def get_token(self, choice=None): + if not self.scanned: + self.scan() + token = self.tokens.pop(0) + if choice and not isinstance(token, choice): + raise CanonicalError('unexpected token ' + repr(token)) + return token + + def get_token_value(self): + token = self.get_token() + return token.value + + def scan(self): + self.tokens.append(ruyaml.StreamStartToken(None, None)) + while True: + self.find_token() + ch = self.data[self.index] + if ch == '\0': + self.tokens.append(ruyaml.StreamEndToken(None, None)) + break + elif ch == '%': + self.tokens.append(self.scan_directive()) + elif ch == '-' and self.data[self.index : self.index + 3] == '---': + self.index += 3 + self.tokens.append(ruyaml.DocumentStartToken(None, None)) + elif ch == '[': + self.index += 1 + self.tokens.append(ruyaml.FlowSequenceStartToken(None, None)) + elif ch == '{': + self.index += 1 + self.tokens.append(ruyaml.FlowMappingStartToken(None, None)) + elif ch == ']': + self.index += 1 + self.tokens.append(ruyaml.FlowSequenceEndToken(None, None)) + elif ch == '}': + self.index += 1 + self.tokens.append(ruyaml.FlowMappingEndToken(None, None)) + elif ch == '?': + self.index += 1 + self.tokens.append(ruyaml.KeyToken(None, None)) + elif ch == ':': + self.index += 1 + self.tokens.append(ruyaml.ValueToken(None, None)) + elif ch == ',': + self.index += 1 + self.tokens.append(ruyaml.FlowEntryToken(None, None)) + elif ch == '*' or ch == '&': + self.tokens.append(self.scan_alias()) + elif ch == '!': + self.tokens.append(self.scan_tag()) + elif ch == '"': + self.tokens.append(self.scan_scalar()) + else: + raise CanonicalError('invalid token') + self.scanned = True + + DIRECTIVE = '%YAML 1.1' + + def scan_directive(self): + if ( + self.data[self.index : self.index + len(self.DIRECTIVE)] == self.DIRECTIVE + and self.data[self.index + len(self.DIRECTIVE)] in ' \n\0' + ): + self.index += len(self.DIRECTIVE) + return ruyaml.DirectiveToken('YAML', (1, 1), None, None) + else: + raise CanonicalError('invalid directive') + + def scan_alias(self): + if self.data[self.index] == '*': + TokenClass = ruyaml.AliasToken + else: + TokenClass = ruyaml.AnchorToken + self.index += 1 + start = self.index + while self.data[self.index] not in ', \n\0': + self.index += 1 + value = self.data[start : self.index] + return TokenClass(value, None, None) + + def scan_tag(self): + self.index += 1 + start = self.index + while self.data[self.index] not in ' \n\0': + self.index += 1 + value = self.data[start : self.index] + if not value: + value = '!' + elif value[0] == '!': + value = 'tag:yaml.org,2002:' + value[1:] + elif value[0] == '<' and value[-1] == '>': + value = value[1:-1] + else: + value = '!' + value + return ruyaml.TagToken(value, None, None) + + QUOTE_CODES = {'x': 2, 'u': 4, 'U': 8} + + QUOTE_REPLACES = { + '\\': '\\', + '"': '"', + ' ': ' ', + 'a': '\x07', + 'b': '\x08', + 'e': '\x1B', + 'f': '\x0C', + 'n': '\x0A', + 'r': '\x0D', + 't': '\x09', + 'v': '\x0B', + 'N': '\u0085', + 'L': '\u2028', + 'P': '\u2029', + '_': '_', + '0': '\x00', + } + + def scan_scalar(self): + self.index += 1 + chunks = [] + start = self.index + ignore_spaces = False + while self.data[self.index] != '"': + if self.data[self.index] == '\\': + ignore_spaces = False + chunks.append(self.data[start : self.index]) + self.index += 1 + ch = self.data[self.index] + self.index += 1 + if ch == '\n': + ignore_spaces = True + elif ch in self.QUOTE_CODES: + length = self.QUOTE_CODES[ch] + code = int(self.data[self.index : self.index + length], 16) + chunks.append(chr(code)) + self.index += length + else: + if ch not in self.QUOTE_REPLACES: + raise CanonicalError('invalid escape code') + chunks.append(self.QUOTE_REPLACES[ch]) + start = self.index + elif self.data[self.index] == '\n': + chunks.append(self.data[start : self.index]) + chunks.append(' ') + self.index += 1 + start = self.index + ignore_spaces = True + elif ignore_spaces and self.data[self.index] == ' ': + self.index += 1 + start = self.index + else: + ignore_spaces = False + self.index += 1 + chunks.append(self.data[start : self.index]) + self.index += 1 + return ruyaml.ScalarToken("".join(chunks), False, None, None) + + def find_token(self): + found = False + while not found: + while self.data[self.index] in ' \t': + self.index += 1 + if self.data[self.index] == '#': + while self.data[self.index] != '\n': + self.index += 1 + if self.data[self.index] == '\n': + self.index += 1 + else: + found = True + + +class CanonicalParser: + def __init__(self): + self.events = [] + self.parsed = False + + def dispose(self): + pass + + # stream: STREAM-START document* STREAM-END + def parse_stream(self): + self.get_token(ruyaml.StreamStartToken) + self.events.append(ruyaml.StreamStartEvent(None, None)) + while not self.check_token(ruyaml.StreamEndToken): + if self.check_token(ruyaml.DirectiveToken, ruyaml.DocumentStartToken): + self.parse_document() + else: + raise CanonicalError( + 'document is expected, got ' + repr(self.tokens[0]) + ) + self.get_token(ruyaml.StreamEndToken) + self.events.append(ruyaml.StreamEndEvent(None, None)) + + # document: DIRECTIVE? DOCUMENT-START node + def parse_document(self): + # node = None + if self.check_token(ruyaml.DirectiveToken): + self.get_token(ruyaml.DirectiveToken) + self.get_token(ruyaml.DocumentStartToken) + self.events.append(ruyaml.DocumentStartEvent(None, None)) + self.parse_node() + self.events.append(ruyaml.DocumentEndEvent(None, None)) + + # node: ALIAS | ANCHOR? TAG? (SCALAR|sequence|mapping) + def parse_node(self): + if self.check_token(ruyaml.AliasToken): + self.events.append(ruyaml.AliasEvent(self.get_token_value(), None, None)) + else: + anchor = None + if self.check_token(ruyaml.AnchorToken): + anchor = self.get_token_value() + tag = None + if self.check_token(ruyaml.TagToken): + tag = self.get_token_value() + if self.check_token(ruyaml.ScalarToken): + self.events.append( + ruyaml.ScalarEvent( + anchor, tag, (False, False), self.get_token_value(), None, None + ) + ) + elif self.check_token(ruyaml.FlowSequenceStartToken): + self.events.append(ruyaml.SequenceStartEvent(anchor, tag, None, None)) + self.parse_sequence() + elif self.check_token(ruyaml.FlowMappingStartToken): + self.events.append(ruyaml.MappingStartEvent(anchor, tag, None, None)) + self.parse_mapping() + else: + raise CanonicalError( + "SCALAR, '[', or '{' is expected, got " + repr(self.tokens[0]) + ) + + # sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END + def parse_sequence(self): + self.get_token(ruyaml.FlowSequenceStartToken) + if not self.check_token(ruyaml.FlowSequenceEndToken): + self.parse_node() + while not self.check_token(ruyaml.FlowSequenceEndToken): + self.get_token(ruyaml.FlowEntryToken) + if not self.check_token(ruyaml.FlowSequenceEndToken): + self.parse_node() + self.get_token(ruyaml.FlowSequenceEndToken) + self.events.append(ruyaml.SequenceEndEvent(None, None)) + + # mapping: MAPPING-START (map_entry (ENTRY map_entry)*)? ENTRY? MAPPING-END + def parse_mapping(self): + self.get_token(ruyaml.FlowMappingStartToken) + if not self.check_token(ruyaml.FlowMappingEndToken): + self.parse_map_entry() + while not self.check_token(ruyaml.FlowMappingEndToken): + self.get_token(ruyaml.FlowEntryToken) + if not self.check_token(ruyaml.FlowMappingEndToken): + self.parse_map_entry() + self.get_token(ruyaml.FlowMappingEndToken) + self.events.append(ruyaml.MappingEndEvent(None, None)) + + # map_entry: KEY node VALUE node + def parse_map_entry(self): + self.get_token(ruyaml.KeyToken) + self.parse_node() + self.get_token(ruyaml.ValueToken) + self.parse_node() + + def parse(self): + self.parse_stream() + self.parsed = True + + def get_event(self): + if not self.parsed: + self.parse() + return self.events.pop(0) + + def check_event(self, *choices): + if not self.parsed: + self.parse() + if self.events: + if not choices: + return True + for choice in choices: + if isinstance(self.events[0], choice): + return True + return False + + def peek_event(self): + if not self.parsed: + self.parse() + return self.events[0] + + +class CanonicalLoader( + CanonicalScanner, CanonicalParser, Composer, Constructor, Resolver +): + def __init__(self, stream): + if hasattr(stream, 'read'): + stream = stream.read() + CanonicalScanner.__init__(self, stream) + CanonicalParser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + + +ruyaml.CanonicalLoader = CanonicalLoader + + +def canonical_scan(stream): + yaml = ruyaml.YAML() + yaml.scanner = CanonicalScanner + return yaml.scan(stream) + + +ruyaml.canonical_scan = canonical_scan + + +def canonical_parse(stream): + return ruyaml.parse(stream, Loader=CanonicalLoader) + + +ruyaml.canonical_parse = canonical_parse + + +def canonical_compose(stream): + return ruyaml.compose(stream, Loader=CanonicalLoader) + + +ruyaml.canonical_compose = canonical_compose + + +def canonical_compose_all(stream): + return ruyaml.compose_all(stream, Loader=CanonicalLoader) + + +ruyaml.canonical_compose_all = canonical_compose_all + + +def canonical_load(stream): + return ruyaml.load(stream, Loader=CanonicalLoader) + + +ruyaml.canonical_load = canonical_load + + +def canonical_load_all(stream): + yaml = ruyaml.YAML(typ='safe', pure=True) + yaml.Loader = CanonicalLoader + return yaml.load_all(stream) + + +ruyaml.canonical_load_all = canonical_load_all diff --git a/_test/lib/test_all.py b/_test/lib/test_all.py new file mode 100644 index 0000000..5c2fa95 --- /dev/null +++ b/_test/lib/test_all.py @@ -0,0 +1,21 @@ +import sys # NOQA + +import test_appliance + +import ruyaml + + +def main(args=None): + collections = [] + import test_yaml + + collections.append(test_yaml) + if ruyaml.__with_libyaml__: + import test_yaml_ext + + collections.append(test_yaml_ext) + test_appliance.run(collections, args) + + +if __name__ == '__main__': + main() diff --git a/_test/lib/test_appliance.py b/_test/lib/test_appliance.py new file mode 100644 index 0000000..a95de5e --- /dev/null +++ b/_test/lib/test_appliance.py @@ -0,0 +1,205 @@ +import argparse +import os +import pprint +import sys +import traceback +import types + +# DATA = 'tests/data' +# determine the position of data dynamically relative to program +# this allows running test while the current path is not the top of the +# repository, e.g. from the tests/data directory: python ../test_yaml.py +DATA = __file__.rsplit(os.sep, 2)[0] + '/data' + + +def find_test_functions(collections): + if not isinstance(collections, list): + collections = [collections] + functions = [] + for collection in collections: + if not isinstance(collection, dict): + collection = vars(collection) + for key in sorted(collection): + value = collection[key] + if isinstance(value, types.FunctionType) and hasattr(value, 'unittest'): + functions.append(value) + return functions + + +def find_test_filenames(directory): + filenames = {} + for filename in os.listdir(directory): + if os.path.isfile(os.path.join(directory, filename)): + base, ext = os.path.splitext(filename) + filenames.setdefault(base, []).append(ext) + filenames = sorted(filenames.items()) + return filenames + + +def parse_arguments(args): + """""" + parser = argparse.ArgumentParser( + usage=""" run the yaml tests. By default + all functions on all appropriate test_files are run. Functions have + unittest attributes that determine the required extensions to filenames + that need to be available in order to run that test. E.g.\n\n + python test_yaml.py test_constructor_types\n + python test_yaml.py --verbose test_tokens spec-02-05\n\n + The presence of an extension in the .skip attribute of a function + disables the test for that function.""" + ) + # ToDo: make into int and test > 0 in functions + parser.add_argument( + '--verbose', + '-v', + action='store_true', + default='YAML_TEST_VERBOSE' in os.environ, + help='set verbosity output', + ) + parser.add_argument( + '--list-functions', + action='store_true', + help="""list all functions with required file extensions for test files + """, + ) + parser.add_argument('function', nargs='?', help="""restrict function to run""") + parser.add_argument( + 'filenames', + nargs='*', + help="""basename of filename set, extensions (.code, .data) have to + be a superset of those in the unittest attribute of the selected + function""", + ) + args = parser.parse_args(args) + # print('args', args) + verbose = args.verbose + include_functions = [args.function] if args.function else [] + include_filenames = args.filenames + # if args is None: + # args = sys.argv[1:] + # verbose = False + # if '-v' in args: + # verbose = True + # args.remove('-v') + # if '--verbose' in args: + # verbose = True + # args.remove('--verbose') # never worked without this + # if 'YAML_TEST_VERBOSE' in os.environ: + # verbose = True + # include_functions = [] + # if args: + # include_functions.append(args.pop(0)) + if 'YAML_TEST_FUNCTIONS' in os.environ: + include_functions.extend(os.environ['YAML_TEST_FUNCTIONS'].split()) + # include_filenames = [] + # include_filenames.extend(args) + if 'YAML_TEST_FILENAMES' in os.environ: + include_filenames.extend(os.environ['YAML_TEST_FILENAMES'].split()) + return include_functions, include_filenames, verbose, args + + +def execute(function, filenames, verbose): + name = function.__name__ + if verbose: + sys.stdout.write('=' * 75 + '\n') + sys.stdout.write('%s(%s)...\n' % (name, ', '.join(filenames))) + try: + function(verbose=verbose, *filenames) + except Exception as exc: + info = sys.exc_info() + if isinstance(exc, AssertionError): + kind = 'FAILURE' + else: + kind = 'ERROR' + if verbose: + traceback.print_exc(limit=1, file=sys.stdout) + else: + sys.stdout.write(kind[0]) + sys.stdout.flush() + else: + kind = 'SUCCESS' + info = None + if not verbose: + sys.stdout.write('.') + sys.stdout.flush() + return (name, filenames, kind, info) + + +def display(results, verbose): + if results and not verbose: + sys.stdout.write('\n') + total = len(results) + failures = 0 + errors = 0 + for name, filenames, kind, info in results: + if kind == 'SUCCESS': + continue + if kind == 'FAILURE': + failures += 1 + if kind == 'ERROR': + errors += 1 + sys.stdout.write('=' * 75 + '\n') + sys.stdout.write('%s(%s): %s\n' % (name, ', '.join(filenames), kind)) + if kind == 'ERROR': + traceback.print_exception(file=sys.stdout, *info) + else: + sys.stdout.write('Traceback (most recent call last):\n') + traceback.print_tb(info[2], file=sys.stdout) + sys.stdout.write('%s: see below\n' % info[0].__name__) + sys.stdout.write('~' * 75 + '\n') + for arg in info[1].args: + pprint.pprint(arg, stream=sys.stdout) + for filename in filenames: + sys.stdout.write('-' * 75 + '\n') + sys.stdout.write('%s:\n' % filename) + with open(filename, 'r', errors='replace') as fp: + data = fp.read() + sys.stdout.write(data) + if data and data[-1] != '\n': + sys.stdout.write('\n') + sys.stdout.write('=' * 75 + '\n') + sys.stdout.write('TESTS: %s\n' % total) + ret_val = 0 + if failures: + sys.stdout.write('FAILURES: %s\n' % failures) + ret_val = 1 + if errors: + sys.stdout.write('ERRORS: %s\n' % errors) + ret_val = 2 + return ret_val + + +def run(collections, args=None): + test_functions = find_test_functions(collections) + test_filenames = find_test_filenames(DATA) + include_functions, include_filenames, verbose, a = parse_arguments(args) + if a.list_functions: + print('test functions:') + for f in test_functions: + print(' {:30s} {}'.format(f.__name__, f.unittest)) + return + results = [] + for function in test_functions: + if include_functions and function.__name__ not in include_functions: + continue + if function.unittest: + for base, exts in test_filenames: + if include_filenames and base not in include_filenames: + continue + filenames = [] + for ext in function.unittest: + if ext not in exts: + break + filenames.append(os.path.join(DATA, base + ext)) + else: + skip_exts = getattr(function, 'skip', []) + for skip_ext in skip_exts: + if skip_ext in exts: + break + else: + result = execute(function, filenames, verbose) + results.append(result) + else: + result = execute(function, [], verbose) + results.append(result) + return display(results, verbose=verbose) diff --git a/_test/lib/test_build.py b/_test/lib/test_build.py new file mode 100644 index 0000000..9fbab43 --- /dev/null +++ b/_test/lib/test_build.py @@ -0,0 +1,15 @@ +if __name__ == '__main__': + import distutils.util + import os + import sys + + build_lib = 'build/lib' + build_lib_ext = os.path.join( + 'build', 'lib.%s-%s' % (distutils.util.get_platform(), sys.version[0:3]) + ) + sys.path.insert(0, build_lib) + sys.path.insert(0, build_lib_ext) + import test_appliance + import test_yaml + + test_appliance.run(test_yaml) diff --git a/_test/lib/test_build_ext.py b/_test/lib/test_build_ext.py new file mode 100644 index 0000000..3a2bc0f --- /dev/null +++ b/_test/lib/test_build_ext.py @@ -0,0 +1,15 @@ +if __name__ == '__main__': + import distutils.util + import os + import sys + + build_lib = 'build/lib' + build_lib_ext = os.path.join( + 'build', 'lib.%s-%s' % (distutils.util.get_platform(), sys.version[0:3]) + ) + sys.path.insert(0, build_lib) + sys.path.insert(0, build_lib_ext) + import test_appliance + import test_yaml_ext + + test_appliance.run(test_yaml_ext) diff --git a/_test/lib/test_canonical.py b/_test/lib/test_canonical.py new file mode 100644 index 0000000..fe27ec6 --- /dev/null +++ b/_test/lib/test_canonical.py @@ -0,0 +1,55 @@ +# Skipped because we have no idea where this "canonical" module +# comes from, nor where all those fixtures originate +import pytest + +pytestmark = pytest.mark.skip +# import canonical # NOQA + +import ruyaml + + +def test_canonical_scanner(canonical_filename, verbose=False): + with open(canonical_filename, 'rb') as fp0: + data = fp0.read() + tokens = list(ruyaml.canonical_scan(data)) + assert tokens, tokens + if verbose: + for token in tokens: + print(token) + + +test_canonical_scanner.unittest = ['.canonical'] + + +def test_canonical_parser(canonical_filename, verbose=False): + with open(canonical_filename, 'rb') as fp0: + data = fp0.read() + events = list(ruyaml.canonical_parse(data)) + assert events, events + if verbose: + for event in events: + print(event) + + +test_canonical_parser.unittest = ['.canonical'] + + +def test_canonical_error(data_filename, canonical_filename, verbose=False): + with open(data_filename, 'rb') as fp0: + data = fp0.read() + try: + output = list(ruyaml.canonical_load_all(data)) # NOQA + except ruyaml.YAMLError as exc: + if verbose: + print(exc) + else: + raise AssertionError('expected an exception') + + +test_canonical_error.unittest = ['.data', '.canonical'] +test_canonical_error.skip = ['.empty'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_constructor.py b/_test/lib/test_constructor.py new file mode 100644 index 0000000..681b34d --- /dev/null +++ b/_test/lib/test_constructor.py @@ -0,0 +1,393 @@ +# Skipped because we have no idea where all those fixtures originate +import pytest + +pytestmark = pytest.mark.skip + +import ruyaml as yaml + +YAML = yaml.YAML + +import datetime +import pprint + +import ruyaml + +try: + set +except NameError: + from sets import Set as set # NOQA + +import ruyaml.tokens + + +def cmp(a, b): + return (a > b) - (a < b) + + +def execute(code): + global value + exec(code) + return value + + +def _make_objects(): + global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3 + global YAMLobject1, YAMLobject2, AnObject, AnInstance, AState, ACustomState + global InitArgs, InitArgsWithState + global NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict + global FixedOffset, today, execute + + class MyLoader(ruyaml.Loader): + pass + + class MyDumper(ruyaml.Dumper): + pass + + class MyTestClass1: + def __init__(self, x, y=0, z=0): + self.x = x + self.y = y + self.z = z + + def __eq__(self, other): + if isinstance(other, MyTestClass1): + return self.__class__, self.__dict__ == other.__class__, other.__dict__ + else: + return False + + def construct1(constructor, node): + mapping = constructor.construct_mapping(node) + return MyTestClass1(**mapping) + + def represent1(representer, native): + return representer.represent_mapping('!tag1', native.__dict__) + + ruyaml.add_constructor('!tag1', construct1, Loader=MyLoader) + ruyaml.add_representer(MyTestClass1, represent1, Dumper=MyDumper) + + class MyTestClass2(MyTestClass1, ruyaml.YAMLObject): + ruyaml.loader = MyLoader + ruyaml.dumper = MyDumper + ruyaml.tag = '!tag2' + + def from_yaml(cls, constructor, node): + x = constructor.construct_yaml_int(node) + return cls(x=x) + + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, representer, native): + return representer.represent_scalar(cls.yaml_tag, str(native.x)) + + to_yaml = classmethod(to_yaml) + + class MyTestClass3(MyTestClass2): + ruyaml.tag = '!tag3' + + def from_yaml(cls, constructor, node): + mapping = constructor.construct_mapping(node) + if '=' in mapping: + x = mapping['='] + del mapping['='] + mapping['x'] = x + return cls(**mapping) + + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, representer, native): + return representer.represent_mapping(cls.yaml_tag, native.__dict__) + + to_yaml = classmethod(to_yaml) + + class YAMLobject1(ruyaml.YAMLObject): + ruyaml.loader = MyLoader + ruyaml.dumper = MyDumper + ruyaml.tag = '!foo' + + def __init__(self, my_parameter=None, my_another_parameter=None): + self.my_parameter = my_parameter + self.my_another_parameter = my_another_parameter + + def __eq__(self, other): + if isinstance(other, YAMLobject1): + return self.__class__, self.__dict__ == other.__class__, other.__dict__ + else: + return False + + class YAMLobject2(ruyaml.YAMLObject): + ruyaml.loader = MyLoader + ruyaml.dumper = MyDumper + ruyaml.tag = '!bar' + + def __init__(self, foo=1, bar=2, baz=3): + self.foo = foo + self.bar = bar + self.baz = baz + + def __getstate__(self): + return {1: self.foo, 2: self.bar, 3: self.baz} + + def __setstate__(self, state): + self.foo = state[1] + self.bar = state[2] + self.baz = state[3] + + def __eq__(self, other): + if isinstance(other, YAMLobject2): + return self.__class__, self.__dict__ == other.__class__, other.__dict__ + else: + return False + + class AnObject: + def __new__(cls, foo=None, bar=None, baz=None): + self = object.__new__(cls) + self.foo = foo + self.bar = bar + self.baz = baz + return self + + def __cmp__(self, other): + return cmp( + (type(self), self.foo, self.bar, self.baz), # NOQA + (type(other), other.foo, other.bar, other.baz), + ) + + def __eq__(self, other): + return type(self) is type(other) and (self.foo, self.bar, self.baz) == ( + other.foo, + other.bar, + other.baz, + ) + + class AnInstance: + def __init__(self, foo=None, bar=None, baz=None): + self.foo = foo + self.bar = bar + self.baz = baz + + def __cmp__(self, other): + return cmp( + (type(self), self.foo, self.bar, self.baz), # NOQA + (type(other), other.foo, other.bar, other.baz), + ) + + def __eq__(self, other): + return type(self) is type(other) and (self.foo, self.bar, self.baz) == ( + other.foo, + other.bar, + other.baz, + ) + + class AState(AnInstance): + def __getstate__(self): + return {'_foo': self.foo, '_bar': self.bar, '_baz': self.baz} + + def __setstate__(self, state): + self.foo = state['_foo'] + self.bar = state['_bar'] + self.baz = state['_baz'] + + class ACustomState(AnInstance): + def __getstate__(self): + return (self.foo, self.bar, self.baz) + + def __setstate__(self, state): + self.foo, self.bar, self.baz = state + + # class InitArgs(AnInstance): + # def __getinitargs__(self): + # return (self.foo, self.bar, self.baz) + # def __getstate__(self): + # return {} + + # class InitArgsWithState(AnInstance): + # def __getinitargs__(self): + # return (self.foo, self.bar) + # def __getstate__(self): + # return self.baz + # def __setstate__(self, state): + # self.baz = state + + class NewArgs(AnObject): + def __getnewargs__(self): + return (self.foo, self.bar, self.baz) + + def __getstate__(self): + return {} + + class NewArgsWithState(AnObject): + def __getnewargs__(self): + return (self.foo, self.bar) + + def __getstate__(self): + return self.baz + + def __setstate__(self, state): + self.baz = state + + InitArgs = NewArgs + + InitArgsWithState = NewArgsWithState + + class Reduce(AnObject): + def __reduce__(self): + return self.__class__, (self.foo, self.bar, self.baz) + + class ReduceWithState(AnObject): + def __reduce__(self): + return self.__class__, (self.foo, self.bar), self.baz + + def __setstate__(self, state): + self.baz = state + + class MyInt(int): + def __eq__(self, other): + return type(self) is type(other) and int(self) == int(other) + + class MyList(list): + def __init__(self, n=1): + self.extend([None] * n) + + def __eq__(self, other): + return type(self) is type(other) and list(self) == list(other) + + class MyDict(dict): + def __init__(self, n=1): + for k in range(n): + self[k] = None + + def __eq__(self, other): + return type(self) is type(other) and dict(self) == dict(other) + + class FixedOffset(datetime.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime.timedelta(minutes=offset) + self.__name = name + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return self.__name + + def dst(self, dt): + return datetime.timedelta(0) + + today = datetime.date.today() + + +try: + from ruamel.ordereddict import ordereddict +except ImportError: + from collections import OrderedDict + + # to get the right name import ... as ordereddict doesn't do that + + class ordereddict(OrderedDict): + pass + + +def _load_code(expression): + return eval(expression, globals()) + + +def _serialize_value(data): + if isinstance(data, list): + return '[%s]' % ', '.join(map(_serialize_value, data)) + elif isinstance(data, dict): + items = [] + for key, value in data.items(): + key = _serialize_value(key) + value = _serialize_value(value) + items.append('%s: %s' % (key, value)) + items.sort() + return '{%s}' % ', '.join(items) + elif isinstance(data, datetime.datetime): + return repr(data.utctimetuple()) + elif isinstance(data, float) and data != data: + return '?' + else: + return str(data) + + +def test_constructor_types(data_filename, code_filename, verbose=False): + _make_objects() + native1 = None + native2 = None + yaml = ruyaml.YAML(typ='safe', pure=True) + yaml.loader = MyLoader + try: + with open(data_filename, 'rb') as fp0: + native1 = list(ruyaml.load_all(fp0)) + if len(native1) == 1: + native1 = native1[0] + with open(code_filename, 'rb') as fp0: + native2 = _load_code(fp0.read()) + try: + if native1 == native2: + return + except TypeError: + pass + # print('native1', native1) + if verbose: + print('SERIALIZED NATIVE1:') + print(_serialize_value(native1)) + print('SERIALIZED NATIVE2:') + print(_serialize_value(native2)) + assert _serialize_value(native1) == _serialize_value(native2), ( + native1, + native2, + ) + finally: + if verbose: + print('NATIVE1:') + pprint.pprint(native1) + print('NATIVE2:') + pprint.pprint(native2) + + +test_constructor_types.unittest = ['.data', '.code'] + + +def test_roundtrip_data(code_filename, roundtrip_filename, verbose=False): + _make_objects() + with open(code_filename, 'rb') as fp0: + value1 = fp0.read() + yaml = YAML(typ='safe', pure=True) + yaml.Loader = MyLoader + native2 = list(yaml.load_all(value1)) + if len(native2) == 1: + native2 = native2[0] + try: + value2 = ruyaml.dump( + native2, + Dumper=MyDumper, + default_flow_style=False, + allow_unicode=True, + encoding='utf-8', + ) + # value2 += x + if verbose: + print('SERIALIZED NATIVE1:') + print(value1) + print('SERIALIZED NATIVE2:') + print(value2) + assert value1 == value2, (value1, value2) + finally: + if verbose: + print('NATIVE2:') + pprint.pprint(native2) + + +test_roundtrip_data.unittest = ['.data', '.roundtrip'] + + +if __name__ == '__main__': + import sys + + import test_constructor # NOQA + + sys.modules['test_constructor'] = sys.modules['__main__'] + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_emitter.py b/_test/lib/test_emitter.py new file mode 100644 index 0000000..0327c1b --- /dev/null +++ b/_test/lib/test_emitter.py @@ -0,0 +1,145 @@ +from __future__ import absolute_import, print_function + +# Skipped because we have no idea where all those fixtures originate +import pytest + +import ruyaml as yaml + +pytestmark = pytest.mark.skip + + +def _compare_events(events1, events2): + assert len(events1) == len(events2), (events1, events2) + for event1, event2 in zip(events1, events2): + assert event1.__class__ == event2.__class__, (event1, event2) + if isinstance(event1, yaml.NodeEvent): + assert event1.anchor == event2.anchor, (event1, event2) + if isinstance(event1, yaml.CollectionStartEvent): + assert event1.tag == event2.tag, (event1, event2) + if isinstance(event1, yaml.ScalarEvent): + if True not in event1.implicit + event2.implicit: + assert event1.tag == event2.tag, (event1, event2) + assert event1.value == event2.value, (event1, event2) + + +def test_emitter_on_data(data_filename, canonical_filename, verbose=False): + with open(data_filename, 'rb') as fp0: + events = list(yaml.parse(fp0)) + output = yaml.emit(events) + if verbose: + print('OUTPUT:') + print(output) + new_events = list(yaml.parse(output)) + _compare_events(events, new_events) + + +test_emitter_on_data.unittest = ['.data', '.canonical'] + + +def test_emitter_on_canonical(canonical_filename, verbose=False): + with open(canonical_filename, 'rb') as fp0: + events = list(yaml.parse(fp0)) + for canonical in [False, True]: + output = yaml.emit(events, canonical=canonical) + if verbose: + print('OUTPUT (canonical=%s):' % canonical) + print(output) + new_events = list(yaml.parse(output)) + _compare_events(events, new_events) + + +test_emitter_on_canonical.unittest = ['.canonical'] + + +def test_emitter_styles(data_filename, canonical_filename, verbose=False): + for filename in [data_filename, canonical_filename]: + with open(filename, 'rb') as fp0: + events = list(yaml.parse(fp0)) + for flow_style in [False, True]: + for style in ['|', '>', '"', "'", ""]: + styled_events = [] + for event in events: + if isinstance(event, yaml.ScalarEvent): + event = yaml.ScalarEvent( + event.anchor, + event.tag, + event.implicit, + event.value, + style=style, + ) + elif isinstance(event, yaml.SequenceStartEvent): + event = yaml.SequenceStartEvent( + event.anchor, + event.tag, + event.implicit, + flow_style=flow_style, + ) + elif isinstance(event, yaml.MappingStartEvent): + event = yaml.MappingStartEvent( + event.anchor, + event.tag, + event.implicit, + flow_style=flow_style, + ) + styled_events.append(event) + output = yaml.emit(styled_events) + if verbose: + print( + 'OUTPUT (filename=%r, flow_style=%r, style=%r)' + % (filename, flow_style, style) + ) + print(output) + new_events = list(yaml.parse(output)) + _compare_events(events, new_events) + + +test_emitter_styles.unittest = ['.data', '.canonical'] + + +class EventsLoader(yaml.Loader): + def construct_event(self, node): + if isinstance(node, yaml.ScalarNode): + mapping = {} + else: + mapping = self.construct_mapping(node) + class_name = str(node.tag[1:]) + 'Event' + if class_name in [ + 'AliasEvent', + 'ScalarEvent', + 'SequenceStartEvent', + 'MappingStartEvent', + ]: + mapping.setdefault('anchor', None) + if class_name in ['ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']: + mapping.setdefault('tag', None) + if class_name in ['SequenceStartEvent', 'MappingStartEvent']: + mapping.setdefault('implicit', True) + if class_name == 'ScalarEvent': + mapping.setdefault('implicit', (False, True)) + mapping.setdefault('value', "") + value = getattr(yaml, class_name)(**mapping) + return value + + +# if Loader is not a composite, add this function +# EventsLoader.add_constructor = yaml.constructor.Constructor.add_constructor + + +EventsLoader.add_constructor(None, EventsLoader.construct_event) + + +def test_emitter_events(events_filename, verbose=False): + with open(events_filename, 'rb') as fp0: + events = list(yaml.load(fp0, Loader=EventsLoader)) + output = yaml.emit(events) + if verbose: + print('OUTPUT:') + print(output) + new_events = list(yaml.parse(output)) + _compare_events(events, new_events) + + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_errors.py b/_test/lib/test_errors.py new file mode 100644 index 0000000..3fb3e1d --- /dev/null +++ b/_test/lib/test_errors.py @@ -0,0 +1,100 @@ +import ruyaml as yaml + +YAML = yaml.YAML + +import warnings + +# Skipped because we have no idea where the "error_filename" +# fixture is supposed to come from +import pytest +import test_emitter + +import ruyaml as yaml + +pytestmark = pytest.mark.skip + +warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning) + + +def test_loader_error(error_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + try: + with open(error_filename, 'rb') as fp0: + list(yaml.load_all(fp0)) + except yaml.YAMLError as exc: + if verbose: + print('%s:' % exc.__class__.__name__, exc) + else: + raise AssertionError('expected an exception') + + +test_loader_error.unittest = ['.loader-error'] + + +def test_loader_error_string(error_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + try: + with open(error_filename, 'rb') as fp0: + list(yaml.load_all(fp0.read())) + except yaml.YAMLError as exc: + if verbose: + print('%s:' % exc.__class__.__name__, exc) + else: + raise AssertionError('expected an exception') + + +test_loader_error_string.unittest = ['.loader-error'] + + +def test_loader_error_single(error_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + try: + with open(error_filename, 'rb') as fp0: + yaml.load(fp0.read()) + except yaml.YAMLError as exc: + if verbose: + print('%s:' % exc.__class__.__name__, exc) + else: + raise AssertionError('expected an exception') + + +test_loader_error_single.unittest = ['.single-loader-error'] + + +def test_emitter_error(error_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(error_filename, 'rb') as fp0: + events = list(yaml.load(fp0, Loader=test_emitter.EventsLoader)) + try: + yaml.emit(events) + except yaml.YAMLError as exc: + if verbose: + print('%s:' % exc.__class__.__name__, exc) + else: + raise AssertionError('expected an exception') + + +test_emitter_error.unittest = ['.emitter-error'] + + +def test_dumper_error(error_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(error_filename, 'rb') as fp0: + code = fp0.read() + try: + import yaml + + exec(code) + except yaml.YAMLError as exc: + if verbose: + print('%s:' % exc.__class__.__name__, exc) + else: + raise AssertionError('expected an exception') + + +test_dumper_error.unittest = ['.dumper-error'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_input_output.py b/_test/lib/test_input_output.py new file mode 100644 index 0000000..8d0c2cb --- /dev/null +++ b/_test/lib/test_input_output.py @@ -0,0 +1,190 @@ +import ruyaml as yaml + +YAML = yaml.YAML +import codecs +import os +import os.path +import tempfile +from io import BytesIO, StringIO + +# Skipped because we have no idea where the "unicode_filename" +# fixture is supposed to come from +import pytest + +import ruyaml as yaml + +pytestmark = pytest.mark.skip + + +def test_unicode_input(unicode_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(unicode_filename, 'rb') as fp: + data = fp.read().decode('utf-8') + value = ' '.join(data.split()) + output = yaml.load(data) + assert output == value, (output, value) + output = yaml.load(StringIO(data)) + assert output == value, (output, value) + for input in [ + data.encode('utf-8'), + codecs.BOM_UTF8 + data.encode('utf-8'), + codecs.BOM_UTF16_BE + data.encode('utf-16-be'), + codecs.BOM_UTF16_LE + data.encode('utf-16-le'), + ]: + if verbose: + print('INPUT:', repr(input[:10]), '...') + output = yaml.load(input) + assert output == value, (output, value) + output = yaml.load(BytesIO(input)) + assert output == value, (output, value) + + +test_unicode_input.unittest = ['.unicode'] + + +def test_unicode_input_errors(unicode_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(unicode_filename, 'rb') as fp: + data = fp.read().decode('utf-8') + for input in [ + data.encode('latin1', 'ignore'), + data.encode('utf-16-be'), + data.encode('utf-16-le'), + codecs.BOM_UTF8 + data.encode('utf-16-be'), + codecs.BOM_UTF16_BE + data.encode('utf-16-le'), + codecs.BOM_UTF16_LE + data.encode('utf-8') + b'!', + ]: + try: + yaml.load(input) + except yaml.YAMLError as exc: + if verbose: + print(exc) + else: + raise AssertionError('expected an exception') + try: + yaml.load(BytesIO(input)) + except yaml.YAMLError as exc: + if verbose: + print(exc) + else: + raise AssertionError('expected an exception') + + +test_unicode_input_errors.unittest = ['.unicode'] + + +def test_unicode_output(unicode_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(unicode_filename, 'rb') as fp: + data = fp.read().decode('utf-8') + value = ' '.join(data.split()) + for allow_unicode in [False, True]: + data1 = yaml.dump(value, allow_unicode=allow_unicode) + for encoding in [None, 'utf-8', 'utf-16-be', 'utf-16-le']: + stream = StringIO() + yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode) + data2 = stream.getvalue() + data3 = yaml.dump(value, encoding=encoding, allow_unicode=allow_unicode) + if encoding is not None: + assert isinstance(data3, bytes) + data3 = data3.decode(encoding) + stream = BytesIO() + if encoding is None: + try: + yaml.dump( + value, stream, encoding=encoding, allow_unicode=allow_unicode + ) + except TypeError as exc: + if verbose: + print(exc) + data4 = None + else: + raise AssertionError('expected an exception') + else: + yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode) + data4 = stream.getvalue() + if verbose: + print('BYTES:', data4[:50]) + data4 = data4.decode(encoding) + for copy in [data1, data2, data3, data4]: + if copy is None: + continue + assert isinstance(copy, str) + if allow_unicode: + try: + copy[4:].encode('ascii') + except UnicodeEncodeError as exc: + if verbose: + print(exc) + else: + raise AssertionError('expected an exception') + else: + copy[4:].encode('ascii') + assert isinstance(data1, str), (type(data1), encoding) + assert isinstance(data2, str), (type(data2), encoding) + + +test_unicode_output.unittest = ['.unicode'] + + +def test_file_output(unicode_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(unicode_filename, 'rb') as fp: + data = fp.read().decode('utf-8') + handle, filename = tempfile.mkstemp() + os.close(handle) + try: + stream = StringIO() + yaml.dump(data, stream, allow_unicode=True) + data1 = stream.getvalue() + stream = BytesIO() + yaml.dump(data, stream, encoding='utf-16-le', allow_unicode=True) + data2 = stream.getvalue().decode('utf-16-le')[1:] + with open(filename, 'w', encoding='utf-16-le') as stream: + yaml.dump(data, stream, allow_unicode=True) + with open(filename, 'r', encoding='utf-16-le') as fp0: + data3 = fp0.read() + with open(filename, 'wb') as stream: + yaml.dump(data, stream, encoding='utf-8', allow_unicode=True) + with open(filename, 'r', encoding='utf-8') as fp0: + data4 = fp0.read() + assert data1 == data2, (data1, data2) + assert data1 == data3, (data1, data3) + assert data1 == data4, (data1, data4) + finally: + if os.path.exists(filename): + os.unlink(filename) + + +test_file_output.unittest = ['.unicode'] + + +def test_unicode_transfer(unicode_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + with open(unicode_filename, 'rb') as fp: + data = fp.read().decode('utf-8') + for encoding in [None, 'utf-8', 'utf-16-be', 'utf-16-le']: + input = data + if encoding is not None: + input = ('\ufeff' + input).encode(encoding) + output1 = yaml.emit(yaml.parse(input), allow_unicode=True) + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + yaml.emit(yaml.parse(input), stream, allow_unicode=True) + output2 = stream.getvalue() + assert isinstance(output1, str), (type(output1), encoding) + if encoding is None: + assert isinstance(output2, str), (type(output1), encoding) + else: + assert isinstance(output2, bytes), (type(output1), encoding) + output2.decode(encoding) + + +test_unicode_transfer.unittest = ['.unicode'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_mark.py b/_test/lib/test_mark.py new file mode 100644 index 0000000..af592a5 --- /dev/null +++ b/_test/lib/test_mark.py @@ -0,0 +1,40 @@ +# Skipped because we have no idea where the "marks_filename" +# fixture is supposed to come from +import pytest + +import ruyaml as yaml + +pytestmark = pytest.mark.skip + + +def test_marks(marks_filename, verbose=False): + with open(marks_filename, 'r') as fp0: + inputs = fp0.read().split('---\n')[1:] + for input in inputs: + index = 0 + line = 0 + column = 0 + while input[index] != '*': + if input[index] == '\n': + line += 1 + column = 0 + else: + column += 1 + index += 1 + mark = yaml.Mark(marks_filename, index, line, column, str(input), index) + snippet = mark.get_snippet(indent=2, max_length=79) + if verbose: + print(snippet) + assert isinstance(snippet, str), type(snippet) + assert snippet.count('\n') == 1, snippet.count('\n') + data, pointer = snippet.split('\n') + assert len(data) < 82, len(data) + assert data[len(pointer) - 1] == '*', data[len(pointer) - 1] + + +test_marks.unittest = ['.marks'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_reader.py b/_test/lib/test_reader.py new file mode 100644 index 0000000..56ad671 --- /dev/null +++ b/_test/lib/test_reader.py @@ -0,0 +1,49 @@ +import codecs # NOQA +import io + +# Skipped because we have no idea where the "error_filename" +# fixture is supposed to come from +import pytest + +import ruyaml.reader + +pytestmark = pytest.mark.skip + + +def _run_reader(data, verbose): + try: + stream = ruyaml.py.reader.Reader(data) + while stream.peek() != '\0': + stream.forward() + except ruyaml.py.reader.ReaderError as exc: + if verbose: + print(exc) + else: + raise AssertionError('expected an exception') + + +def test_stream_error(error_filename, verbose=False): + with open(error_filename, 'rb') as fp0: + _run_reader(fp0, verbose) + with open(error_filename, 'rb') as fp0: + _run_reader(fp0.read(), verbose) + for encoding in ['utf-8', 'utf-16-le', 'utf-16-be']: + try: + with open(error_filename, 'rb') as fp0: + data = fp0.read().decode(encoding) + break + except UnicodeDecodeError: + pass + else: + return + _run_reader(data, verbose) + with io.open(error_filename, encoding=encoding) as fp: + _run_reader(fp, verbose) + + +test_stream_error.unittest = ['.stream-error'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_recursive.py b/_test/lib/test_recursive.py new file mode 100644 index 0000000..e9d9565 --- /dev/null +++ b/_test/lib/test_recursive.py @@ -0,0 +1,63 @@ +# Skipped because we have no idea where the "recursive_filename" +# fixture is supposed to come from +import pytest + +import ruyaml + +pytestmark = pytest.mark.skip + + +class AnInstance: + def __init__(self, foo, bar): + self.foo = foo + self.bar = bar + + def __repr__(self): + try: + return '%s(foo=%r, bar=%r)' % (self.__class__.__name__, self.foo, self.bar) + except RuntimeError: + return '%s(foo=..., bar=...)' % self.__class__.__name__ + + +class AnInstanceWithState(AnInstance): + def __getstate__(self): + return {'attributes': [self.foo, self.bar]} + + def __setstate__(self, state): + self.foo, self.bar = state['attributes'] + + +def test_recursive(recursive_filename, verbose=False): + yaml = ruyaml.YAML(typ='safe', pure=True) + context = globals().copy() + with open(recursive_filename, 'rb') as fp0: + exec(fp0.read(), context) + value1 = context['value'] + output1 = None + value2 = None + output2 = None + try: + buf = ruyaml.compat.StringIO() + output1 = yaml.dump(value1, buf) + yaml.load(output1) + value2 = buf.getvalue() + buf = ruyaml.compat.StringIO() + yaml.dump(value2, buf) + output2 = buf.getvalue() + assert output1 == output2, (output1, output2) + finally: + if verbose: + print('VALUE1:', value1) + print('VALUE2:', value2) + print('OUTPUT1:') + print(output1) + print('OUTPUT2:') + print(output2) + + +test_recursive.unittest = ['.recursive'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_representer.py b/_test/lib/test_representer.py new file mode 100644 index 0000000..9eb290c --- /dev/null +++ b/_test/lib/test_representer.py @@ -0,0 +1,59 @@ +import ruyaml as yaml + +YAML = yaml.YAML + +import pprint + +# Skipped because we have no idea where the "code_filename" +# fixture is supposed to come from +import pytest +import test_constructor + +pytestmark = pytest.mark.skip + + +def test_representer_types(code_filename, verbose=False): + yaml = YAML(typ='safe', pure=True) + test_constructor._make_objects() + for allow_unicode in [False, True]: + for encoding in ['utf-8', 'utf-16-be', 'utf-16-le']: + with open(code_filename, 'rb') as fp0: + native1 = test_constructor._load_code(fp0.read()) + native2 = None + try: + output = yaml.dump( + native1, + Dumper=test_constructor.MyDumper, + allow_unicode=allow_unicode, + encoding=encoding, + ) + native2 = yaml.load(output, Loader=test_constructor.MyLoader) + try: + if native1 == native2: + continue + except TypeError: + pass + value1 = test_constructor._serialize_value(native1) + value2 = test_constructor._serialize_value(native2) + if verbose: + print('SERIALIZED NATIVE1:') + print(value1) + print('SERIALIZED NATIVE2:') + print(value2) + assert value1 == value2, (native1, native2) + finally: + if verbose: + print('NATIVE1:') + pprint.pprint(native1) + print('NATIVE2:') + pprint.pprint(native2) + print('OUTPUT:') + print(output) + + +test_representer_types.unittest = ['.code'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_resolver.py b/_test/lib/test_resolver.py new file mode 100644 index 0000000..41c0364 --- /dev/null +++ b/_test/lib/test_resolver.py @@ -0,0 +1,117 @@ +import pprint + +# Skipped because we have no idea where all those fixtures originate +import pytest + +import ruyaml as yaml + +pytestmark = pytest.mark.skip + + +def test_implicit_resolver(data_filename, detect_filename, verbose=False): + correct_tag = None + node = None + try: + with open(detect_filename, 'r') as fp0: + correct_tag = fp0.read().strip() + with open(data_filename, 'rb') as fp0: + node = yaml.compose(fp0) + assert isinstance(node, yaml.SequenceNode), node + for scalar in node.value: + assert isinstance(scalar, yaml.ScalarNode), scalar + assert scalar.tag == correct_tag, (scalar.tag, correct_tag) + finally: + if verbose: + print('CORRECT TAG:', correct_tag) + if hasattr(node, 'value'): + print('CHILDREN:') + pprint.pprint(node.value) + + +test_implicit_resolver.unittest = ['.data', '.detect'] + + +def _make_path_loader_and_dumper(): + global MyLoader, MyDumper + + class MyLoader(yaml.Loader): + pass + + class MyDumper(yaml.Dumper): + pass + + yaml.add_path_resolver('!root', [], Loader=MyLoader, Dumper=MyDumper) + yaml.add_path_resolver('!root/scalar', [], str, Loader=MyLoader, Dumper=MyDumper) + yaml.add_path_resolver( + '!root/key11/key12/*', ['key11', 'key12'], Loader=MyLoader, Dumper=MyDumper + ) + yaml.add_path_resolver( + '!root/key21/1/*', ['key21', 1], Loader=MyLoader, Dumper=MyDumper + ) + yaml.add_path_resolver( + '!root/key31/*/*/key14/map', + ['key31', None, None, 'key14'], + dict, + Loader=MyLoader, + Dumper=MyDumper, + ) + + return MyLoader, MyDumper + + +def _convert_node(node): + if isinstance(node, yaml.ScalarNode): + return (node.tag, node.value) + elif isinstance(node, yaml.SequenceNode): + value = [] + for item in node.value: + value.append(_convert_node(item)) + return (node.tag, value) + elif isinstance(node, yaml.MappingNode): + value = [] + for key, item in node.value: + value.append((_convert_node(key), _convert_node(item))) + return (node.tag, value) + + +def test_path_resolver_loader(data_filename, path_filename, verbose=False): + _make_path_loader_and_dumper() + with open(data_filename, 'rb') as fp0: + nodes1 = list(yaml.compose_all(fp0.read(), Loader=MyLoader)) + with open(path_filename, 'rb') as fp0: + nodes2 = list(yaml.compose_all(fp0.read())) + try: + for node1, node2 in zip(nodes1, nodes2): + data1 = _convert_node(node1) + data2 = _convert_node(node2) + assert data1 == data2, (data1, data2) + finally: + if verbose: + print(yaml.serialize_all(nodes1)) + + +test_path_resolver_loader.unittest = ['.data', '.path'] + + +def test_path_resolver_dumper(data_filename, path_filename, verbose=False): + _make_path_loader_and_dumper() + for filename in [data_filename, path_filename]: + with open(filename, 'rb') as fp0: + output = yaml.serialize_all(yaml.compose_all(fp0), Dumper=MyDumper) + if verbose: + print(output) + nodes1 = yaml.compose_all(output) + with open(data_filename, 'rb') as fp0: + nodes2 = yaml.compose_all(fp0) + for node1, node2 in zip(nodes1, nodes2): + data1 = _convert_node(node1) + data2 = _convert_node(node2) + assert data1 == data2, (data1, data2) + + +test_path_resolver_dumper.unittest = ['.data', '.path'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_structure.py b/_test/lib/test_structure.py new file mode 100644 index 0000000..0a3bc39 --- /dev/null +++ b/_test/lib/test_structure.py @@ -0,0 +1,234 @@ +import pprint + +import canonical # NOQA + +# Skipped because we have no idea where this "canonical" module +# comes from, nor where all those fixtures originate +import pytest + +import ruyaml + +pytestmark = pytest.mark.skip +# import canonical # NOQA + + +def _convert_structure(loader): + if loader.check_event(ruyaml.ScalarEvent): + event = loader.get_event() + if event.tag or event.anchor or event.value: + return True + else: + return None + elif loader.check_event(ruyaml.SequenceStartEvent): + loader.get_event() + sequence = [] + while not loader.check_event(ruyaml.SequenceEndEvent): + sequence.append(_convert_structure(loader)) + loader.get_event() + return sequence + elif loader.check_event(ruyaml.MappingStartEvent): + loader.get_event() + mapping = [] + while not loader.check_event(ruyaml.MappingEndEvent): + key = _convert_structure(loader) + value = _convert_structure(loader) + mapping.append((key, value)) + loader.get_event() + return mapping + elif loader.check_event(ruyaml.AliasEvent): + loader.get_event() + return '*' + else: + loader.get_event() + return '?' + + +def test_structure(data_filename, structure_filename, verbose=False): + nodes1 = [] + with open(structure_filename, 'r') as fp: + nodes2 = eval(fp.read()) + try: + with open(data_filename, 'rb') as fp: + loader = ruyaml.Loader(fp) + while loader.check_event(): + if loader.check_event( + ruyaml.StreamStartEvent, + ruyaml.StreamEndEvent, + ruyaml.DocumentStartEvent, + ruyaml.DocumentEndEvent, + ): + loader.get_event() + continue + nodes1.append(_convert_structure(loader)) + if len(nodes1) == 1: + nodes1 = nodes1[0] + assert nodes1 == nodes2, (nodes1, nodes2) + finally: + if verbose: + print('NODES1:') + pprint.pprint(nodes1) + print('NODES2:') + pprint.pprint(nodes2) + + +test_structure.unittest = ['.data', '.structure'] + + +def _compare_events(events1, events2, full=False): + assert len(events1) == len(events2), (len(events1), len(events2)) + for event1, event2 in zip(events1, events2): + assert event1.__class__ == event2.__class__, (event1, event2) + if isinstance(event1, ruyaml.AliasEvent) and full: + assert event1.anchor == event2.anchor, (event1, event2) + if isinstance(event1, (ruyaml.ScalarEvent, ruyaml.CollectionStartEvent)): + if ( + event1.tag not in [None, '!'] and event2.tag not in [None, '!'] + ) or full: + assert event1.tag == event2.tag, (event1, event2) + if isinstance(event1, ruyaml.ScalarEvent): + assert event1.value == event2.value, (event1, event2) + + +def test_parser(data_filename, canonical_filename, verbose=False): + events1 = None + events2 = None + try: + with open(data_filename, 'rb') as fp0: + events1 = list(ruyaml.parse(fp0)) + with open(canonical_filename, 'rb') as fp0: + events2 = list(ruyaml.canonical_parse(fp0)) + _compare_events(events1, events2) + finally: + if verbose: + print('EVENTS1:') + pprint.pprint(events1) + print('EVENTS2:') + pprint.pprint(events2) + + +test_parser.unittest = ['.data', '.canonical'] + + +def test_parser_on_canonical(canonical_filename, verbose=False): + events1 = None + events2 = None + try: + with open(canonical_filename, 'rb') as fp0: + events1 = list(ruyaml.parse(fp0)) + with open(canonical_filename, 'rb') as fp0: + events2 = list(ruyaml.canonical_parse(fp0)) + _compare_events(events1, events2, full=True) + finally: + if verbose: + print('EVENTS1:') + pprint.pprint(events1) + print('EVENTS2:') + pprint.pprint(events2) + + +test_parser_on_canonical.unittest = ['.canonical'] + + +def _compare_nodes(node1, node2): + assert node1.__class__ == node2.__class__, (node1, node2) + assert node1.tag == node2.tag, (node1, node2) + if isinstance(node1, ruyaml.ScalarNode): + assert node1.value == node2.value, (node1, node2) + else: + assert len(node1.value) == len(node2.value), (node1, node2) + for item1, item2 in zip(node1.value, node2.value): + if not isinstance(item1, tuple): + item1 = (item1,) + item2 = (item2,) + for subnode1, subnode2 in zip(item1, item2): + _compare_nodes(subnode1, subnode2) + + +def test_composer(data_filename, canonical_filename, verbose=False): + nodes1 = None + nodes2 = None + try: + with open(data_filename, 'rb') as fp0: + nodes1 = list(ruyaml.compose_all(fp0)) + with open(canonical_filename, 'rb') as fp0: + nodes2 = list(ruyaml.canonical_compose_all(fp0)) + assert len(nodes1) == len(nodes2), (len(nodes1), len(nodes2)) + for node1, node2 in zip(nodes1, nodes2): + _compare_nodes(node1, node2) + finally: + if verbose: + print('NODES1:') + pprint.pprint(nodes1) + print('NODES2:') + pprint.pprint(nodes2) + + +test_composer.unittest = ['.data', '.canonical'] + + +def _make_loader(): + global MyLoader + + class MyLoader(ruyaml.Loader): + def construct_sequence(self, node): + return tuple(ruyaml.Loader.construct_sequence(self, node)) + + def construct_mapping(self, node): + pairs = self.construct_pairs(node) + pairs.sort(key=(lambda i: str(i))) + return pairs + + def construct_undefined(self, node): + return self.construct_scalar(node) + + MyLoader.add_constructor('tag:yaml.org,2002:map', MyLoader.construct_mapping) + MyLoader.add_constructor(None, MyLoader.construct_undefined) + + +def _make_canonical_loader(): + global MyCanonicalLoader + + class MyCanonicalLoader(ruyaml.CanonicalLoader): + def construct_sequence(self, node): + return tuple(ruyaml.CanonicalLoader.construct_sequence(self, node)) + + def construct_mapping(self, node): + pairs = self.construct_pairs(node) + pairs.sort(key=(lambda i: str(i))) + return pairs + + def construct_undefined(self, node): + return self.construct_scalar(node) + + MyCanonicalLoader.add_constructor( + 'tag:yaml.org,2002:map', MyCanonicalLoader.construct_mapping + ) + MyCanonicalLoader.add_constructor(None, MyCanonicalLoader.construct_undefined) + + +def test_constructor(data_filename, canonical_filename, verbose=False): + _make_loader() + _make_canonical_loader() + native1 = None + native2 = None + yaml = ruyaml.YAML(typ='safe') + try: + with open(data_filename, 'rb') as fp0: + native1 = list(yaml.load(fp0, Loader=MyLoader)) + with open(canonical_filename, 'rb') as fp0: + native2 = list(yaml.load(fp0, Loader=MyCanonicalLoader)) + assert native1 == native2, (native1, native2) + finally: + if verbose: + print('NATIVE1:') + pprint.pprint(native1) + print('NATIVE2:') + pprint.pprint(native2) + + +test_constructor.unittest = ['.data', '.canonical'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_tokens.py b/_test/lib/test_tokens.py new file mode 100644 index 0000000..8c213fd --- /dev/null +++ b/_test/lib/test_tokens.py @@ -0,0 +1,93 @@ +# Skipped because we have no idea where all those fixtures originate +import pytest + +pytestmark = pytest.mark.skip + +import pprint + +import ruyaml + +# Tokens mnemonic: +# directive: % +# document_start: --- +# document_end: ... +# alias: * +# anchor: & +# tag: ! +# scalar _ +# block_sequence_start: [[ +# block_mapping_start: {{ +# block_end: ]} +# flow_sequence_start: [ +# flow_sequence_end: ] +# flow_mapping_start: { +# flow_mapping_end: } +# entry: , +# key: ? +# value: : + +_replaces = { + ruyaml.DirectiveToken: '%', + ruyaml.DocumentStartToken: '---', + ruyaml.DocumentEndToken: '...', + ruyaml.AliasToken: '*', + ruyaml.AnchorToken: '&', + ruyaml.TagToken: '!', + ruyaml.ScalarToken: '_', + ruyaml.BlockSequenceStartToken: '[[', + ruyaml.BlockMappingStartToken: '{{', + ruyaml.BlockEndToken: ']}', + ruyaml.FlowSequenceStartToken: '[', + ruyaml.FlowSequenceEndToken: ']', + ruyaml.FlowMappingStartToken: '{', + ruyaml.FlowMappingEndToken: '}', + ruyaml.BlockEntryToken: ',', + ruyaml.FlowEntryToken: ',', + ruyaml.KeyToken: '?', + ruyaml.ValueToken: ':', +} + + +def test_tokens(data_filename, tokens_filename, verbose=False): + tokens1 = [] + with open(tokens_filename, 'r') as fp: + tokens2 = fp.read().split() + try: + yaml = ruyaml.YAML(typ='unsafe', pure=True) + with open(data_filename, 'rb') as fp1: + for token in yaml.scan(fp1): + if not isinstance( + token, (ruyaml.StreamStartToken, ruyaml.StreamEndToken) + ): + tokens1.append(_replaces[token.__class__]) + finally: + if verbose: + print('TOKENS1:', ' '.join(tokens1)) + print('TOKENS2:', ' '.join(tokens2)) + assert len(tokens1) == len(tokens2), (tokens1, tokens2) + for token1, token2 in zip(tokens1, tokens2): + assert token1 == token2, (token1, token2) + + +test_tokens.unittest = ['.data', '.tokens'] + + +def test_scanner(data_filename, canonical_filename, verbose=False): + for filename in [data_filename, canonical_filename]: + tokens = [] + try: + yaml = ruyaml.YAML(typ='unsafe', pure=False) + with open(filename, 'rb') as fp: + for token in yaml.scan(fp): + tokens.append(token.__class__.__name__) + finally: + if verbose: + pprint.pprint(tokens) + + +test_scanner.unittest = ['.data', '.canonical'] + +if __name__ == '__main__': + import test_appliance + + test_appliance.run(globals()) diff --git a/_test/lib/test_yaml.py b/_test/lib/test_yaml.py new file mode 100644 index 0000000..8df5d1f --- /dev/null +++ b/_test/lib/test_yaml.py @@ -0,0 +1,21 @@ +# coding: utf-8 + +from test_canonical import * # NOQA +from test_constructor import * # NOQA +from test_emitter import * # NOQA +from test_errors import * # NOQA +from test_input_output import * # NOQA +from test_mark import * # NOQA +from test_reader import * # NOQA +from test_recursive import * # NOQA +from test_representer import * # NOQA +from test_resolver import * # NOQA +from test_structure import * # NOQA +from test_tokens import * # NOQA + +if __name__ == '__main__': + import sys + + import test_appliance + + sys.exit(test_appliance.run(globals())) diff --git a/_test/lib/test_yaml_ext.py b/_test/lib/test_yaml_ext.py new file mode 100644 index 0000000..15af5a7 --- /dev/null +++ b/_test/lib/test_yaml_ext.py @@ -0,0 +1,418 @@ +# coding: utf-8 + +import pprint +import types + +import ruyaml + +try: + import _ruyaml +except ImportError: + import pytest + + pytestmark = pytest.mark.skip + + class DummyLoader(type): + pass + + ruyaml.CLoader = DummyLoader + ruyaml.CDumper = DummyLoader + +ruyaml.PyBaseLoader = ruyaml.BaseLoader +ruyaml.PySafeLoader = ruyaml.SafeLoader +ruyaml.PyLoader = ruyaml.Loader +ruyaml.PyBaseDumper = ruyaml.BaseDumper +ruyaml.PySafeDumper = ruyaml.SafeDumper +ruyaml.PyDumper = ruyaml.Dumper + +old_scan = ruyaml.scan + + +def new_scan(stream, Loader=ruyaml.CLoader): + return old_scan(stream, Loader) + + +old_parse = ruyaml.parse + + +def new_parse(stream, Loader=ruyaml.CLoader): + return old_parse(stream, Loader) + + +old_compose = ruyaml.compose + + +def new_compose(stream, Loader=ruyaml.CLoader): + return old_compose(stream, Loader) + + +old_compose_all = ruyaml.compose_all + + +def new_compose_all(stream, Loader=ruyaml.CLoader): + return old_compose_all(stream, Loader) + + +old_load = ruyaml.load + + +def new_load(stream, Loader=ruyaml.CLoader): + return old_load(stream, Loader) + + +old_load_all = ruyaml.load_all + + +def new_load_all(stream, Loader=ruyaml.CLoader): + return old_load_all(stream, Loader) + + +old_safe_load = ruyaml.safe_load + + +def new_safe_load(stream): + return old_load(stream, ruyaml.CSafeLoader) + + +old_safe_load_all = ruyaml.safe_load_all + + +def new_safe_load_all(stream): + return old_load_all(stream, ruyaml.CSafeLoader) + + +old_emit = ruyaml.emit + + +def new_emit(events, stream=None, Dumper=ruyaml.CDumper, **kwds): + return old_emit(events, stream, Dumper, **kwds) + + +old_serialize = ruyaml.serialize + + +def new_serialize(node, stream, Dumper=ruyaml.CDumper, **kwds): + return old_serialize(node, stream, Dumper, **kwds) + + +old_serialize_all = ruyaml.serialize_all + + +def new_serialize_all(nodes, stream=None, Dumper=ruyaml.CDumper, **kwds): + return old_serialize_all(nodes, stream, Dumper, **kwds) + + +old_dump = ruyaml.dump + + +def new_dump(data, stream=None, Dumper=ruyaml.CDumper, **kwds): + return old_dump(data, stream, Dumper, **kwds) + + +old_dump_all = ruyaml.dump_all + + +def new_dump_all(documents, stream=None, Dumper=ruyaml.CDumper, **kwds): + return old_dump_all(documents, stream, Dumper, **kwds) + + +old_safe_dump = ruyaml.safe_dump + + +def new_safe_dump(data, stream=None, **kwds): + return old_dump(data, stream, ruyaml.CSafeDumper, **kwds) + + +old_safe_dump_all = ruyaml.safe_dump_all + + +def new_safe_dump_all(documents, stream=None, **kwds): + return old_dump_all(documents, stream, ruyaml.CSafeDumper, **kwds) + + +def _set_up(): + ruyaml.BaseLoader = ruyaml.CBaseLoader + ruyaml.SafeLoader = ruyaml.CSafeLoader + ruyaml.Loader = ruyaml.CLoader + ruyaml.BaseDumper = ruyaml.CBaseDumper + ruyaml.SafeDumper = ruyaml.CSafeDumper + ruyaml.Dumper = ruyaml.CDumper + ruyaml.scan = new_scan + ruyaml.parse = new_parse + ruyaml.compose = new_compose + ruyaml.compose_all = new_compose_all + ruyaml.load = new_load + ruyaml.load_all = new_load_all + ruyaml.safe_load = new_safe_load + ruyaml.safe_load_all = new_safe_load_all + ruyaml.emit = new_emit + ruyaml.serialize = new_serialize + ruyaml.serialize_all = new_serialize_all + ruyaml.dump = new_dump + ruyaml.dump_all = new_dump_all + ruyaml.safe_dump = new_safe_dump + ruyaml.safe_dump_all = new_safe_dump_all + + +def _tear_down(): + ruyaml.BaseLoader = ruyaml.PyBaseLoader + ruyaml.SafeLoader = ruyaml.PySafeLoader + ruyaml.Loader = ruyaml.PyLoader + ruyaml.BaseDumper = ruyaml.PyBaseDumper + ruyaml.SafeDumper = ruyaml.PySafeDumper + ruyaml.Dumper = ruyaml.PyDumper + ruyaml.scan = old_scan + ruyaml.parse = old_parse + ruyaml.compose = old_compose + ruyaml.compose_all = old_compose_all + ruyaml.load = old_load + ruyaml.load_all = old_load_all + ruyaml.safe_load = old_safe_load + ruyaml.safe_load_all = old_safe_load_all + ruyaml.emit = old_emit + ruyaml.serialize = old_serialize + ruyaml.serialize_all = old_serialize_all + ruyaml.dump = old_dump + ruyaml.dump_all = old_dump_all + ruyaml.safe_dump = old_safe_dump + ruyaml.safe_dump_all = old_safe_dump_all + + +def test_c_version(verbose=False): + if verbose: + print(_ruyaml.get_version()) + print(_ruyaml.get_version_string()) + assert ('%s.%s.%s' % _ruyaml.get_version()) == _ruyaml.get_version_string(), ( + _ruyaml.get_version(), + _ruyaml.get_version_string(), + ) + + +def _compare_scanners(py_data, c_data, verbose): + yaml = ruyaml.YAML(typ='unsafe', pure=True) + py_tokens = list(yaml.scan(py_data, Loader=ruyaml.PyLoader)) + c_tokens = [] + try: + yaml = ruyaml.YAML(typ='unsafe', pure=False) + for token in yaml.scan(c_data, Loader=ruyaml.CLoader): + c_tokens.append(token) + assert len(py_tokens) == len(c_tokens), (len(py_tokens), len(c_tokens)) + for py_token, c_token in zip(py_tokens, c_tokens): + assert py_token.__class__ == c_token.__class__, (py_token, c_token) + if hasattr(py_token, 'value'): + assert py_token.value == c_token.value, (py_token, c_token) + if isinstance(py_token, ruyaml.StreamEndToken): + continue + py_start = ( + py_token.start_mark.index, + py_token.start_mark.line, + py_token.start_mark.column, + ) + py_end = ( + py_token.end_mark.index, + py_token.end_mark.line, + py_token.end_mark.column, + ) + c_start = ( + c_token.start_mark.index, + c_token.start_mark.line, + c_token.start_mark.column, + ) + c_end = ( + c_token.end_mark.index, + c_token.end_mark.line, + c_token.end_mark.column, + ) + assert py_start == c_start, (py_start, c_start) + assert py_end == c_end, (py_end, c_end) + finally: + if verbose: + print('PY_TOKENS:') + pprint.pprint(py_tokens) + print('C_TOKENS:') + pprint.pprint(c_tokens) + + +def test_c_scanner(data_filename, canonical_filename, verbose=False): + with open(data_filename, 'rb') as fp0: + with open(data_filename, 'rb') as fp1: + _compare_scanners(fp0, fp1, verbose) + with open(data_filename, 'rb') as fp0: + with open(data_filename, 'rb') as fp1: + _compare_scanners(fp0.read(), fp1.read(), verbose) + with open(canonical_filename, 'rb') as fp0: + with open(canonical_filename, 'rb') as fp1: + _compare_scanners(fp0, fp1, verbose) + with open(canonical_filename, 'rb') as fp0: + with open(canonical_filename, 'rb') as fp1: + _compare_scanners(fp0.read(), fp1.read(), verbose) + + +test_c_scanner.unittest = ['.data', '.canonical'] +test_c_scanner.skip = ['.skip-ext'] + + +def _compare_parsers(py_data, c_data, verbose): + py_events = list(ruyaml.parse(py_data, Loader=ruyaml.PyLoader)) + c_events = [] + try: + for event in ruyaml.parse(c_data, Loader=ruyaml.CLoader): + c_events.append(event) + assert len(py_events) == len(c_events), (len(py_events), len(c_events)) + for py_event, c_event in zip(py_events, c_events): + for attribute in [ + '__class__', + 'anchor', + 'tag', + 'implicit', + 'value', + 'explicit', + 'version', + 'tags', + ]: + py_value = getattr(py_event, attribute, None) + c_value = getattr(c_event, attribute, None) + assert py_value == c_value, (py_event, c_event, attribute) + finally: + if verbose: + print('PY_EVENTS:') + pprint.pprint(py_events) + print('C_EVENTS:') + pprint.pprint(c_events) + + +def test_c_parser(data_filename, canonical_filename, verbose=False): + with open(data_filename, 'rb') as fp0: + with open(data_filename, 'rb') as fp1: + _compare_parsers(fp0, fp1, verbose) + with open(data_filename, 'rb') as fp0: + with open(data_filename, 'rb') as fp1: + _compare_parsers(fp0.read(), fp1.read(), verbose) + with open(canonical_filename, 'rb') as fp0: + with open(canonical_filename, 'rb') as fp1: + _compare_parsers(fp0, fp1, verbose) + with open(canonical_filename, 'rb') as fp0: + with open(canonical_filename, 'rb') as fp1: + _compare_parsers(fp0.read(), fp1.read(), verbose) + + +test_c_parser.unittest = ['.data', '.canonical'] +test_c_parser.skip = ['.skip-ext'] + + +def _compare_emitters(data, verbose): + events = list(ruyaml.parse(data, Loader=ruyaml.PyLoader)) + c_data = ruyaml.emit(events, Dumper=ruyaml.CDumper) + if verbose: + print(c_data) + py_events = list(ruyaml.parse(c_data, Loader=ruyaml.PyLoader)) + c_events = list(ruyaml.parse(c_data, Loader=ruyaml.CLoader)) + try: + assert len(events) == len(py_events), (len(events), len(py_events)) + assert len(events) == len(c_events), (len(events), len(c_events)) + for event, py_event, c_event in zip(events, py_events, c_events): + for attribute in [ + '__class__', + 'anchor', + 'tag', + 'implicit', + 'value', + 'explicit', + 'version', + 'tags', + ]: + value = getattr(event, attribute, None) + py_value = getattr(py_event, attribute, None) + c_value = getattr(c_event, attribute, None) + if ( + attribute == 'tag' + and value in [None, '!'] + and py_value in [None, '!'] + and c_value in [None, '!'] + ): + continue + if attribute == 'explicit' and (py_value or c_value): + continue + assert value == py_value, (event, py_event, attribute) + assert value == c_value, (event, c_event, attribute) + finally: + if verbose: + print('EVENTS:') + pprint.pprint(events) + print('PY_EVENTS:') + pprint.pprint(py_events) + print('C_EVENTS:') + pprint.pprint(c_events) + + +def test_c_emitter(data_filename, canonical_filename, verbose=False): + with open(data_filename, 'rb') as fp0: + _compare_emitters(fp0.read(), verbose) + with open(canonical_filename, 'rb') as fp0: + _compare_emitters(fp0.read(), verbose) + + +test_c_emitter.unittest = ['.data', '.canonical'] +test_c_emitter.skip = ['.skip-ext'] + + +def wrap_ext_function(function): + def wrapper(*args, **kwds): + _set_up() + try: + function(*args, **kwds) + finally: + _tear_down() + + wrapper.__name__ = '%s_ext' % function.__name__ + wrapper.unittest = function.unittest + wrapper.skip = getattr(function, 'skip', []) + ['.skip-ext'] + return wrapper + + +def wrap_ext(collections): + functions = [] + if not isinstance(collections, list): + collections = [collections] + for collection in collections: + if not isinstance(collection, dict): + collection = vars(collection) + for key in sorted(collection): + value = collection[key] + if isinstance(value, types.FunctionType) and hasattr(value, 'unittest'): + functions.append(wrap_ext_function(value)) + for function in functions: + assert function.__name__ not in globals() + globals()[function.__name__] = function + + +import test_constructor # NOQA +import test_emitter # NOQA +import test_errors # NOQA +import test_input_output # NOQA +import test_recursive # NOQA +import test_representer # NOQA +import test_resolver # NOQA +import test_structure # NOQA +import test_tokens # NOQA + +wrap_ext( + [ + test_tokens, + test_structure, + test_errors, + test_resolver, + test_constructor, + test_emitter, + test_representer, + test_recursive, + test_input_output, + ] +) + +if __name__ == '__main__': + import sys + + import test_appliance + + sys.exit(test_appliance.run(globals())) diff --git a/_test/roundtrip.py b/_test/roundtrip.py new file mode 100644 index 0000000..9313f42 --- /dev/null +++ b/_test/roundtrip.py @@ -0,0 +1,346 @@ +# coding: utf-8 + +""" +helper routines for testing round trip of commented YAML data +""" +import io +import sys +import textwrap +from pathlib import Path + +import ruyaml + +unset = object() + + +def dedent(data): + try: + position_of_first_newline = data.index('\n') + for idx in range(position_of_first_newline): + if not data[idx].isspace(): + raise ValueError + except ValueError: + pass + else: + data = data[position_of_first_newline + 1 :] + return textwrap.dedent(data) + + +def round_trip_load(inp, preserve_quotes=None, version=None): + import ruyaml # NOQA + + dinp = dedent(inp) + yaml = ruyaml.YAML() + yaml.preserve_quotes = preserve_quotes + yaml.version = version + return yaml.load(dinp) + + +def round_trip_load_all(inp, preserve_quotes=None, version=None): + import ruyaml # NOQA + + dinp = dedent(inp) + yaml = ruyaml.YAML() + yaml.preserve_quotes = preserve_quotes + yaml.version = version + return yaml.load_all(dinp) + + +def round_trip_dump( + data, + stream=None, # *, + indent=None, + block_seq_indent=None, + default_flow_style=unset, + top_level_colon_align=None, + prefix_colon=None, + explicit_start=None, + explicit_end=None, + version=None, + allow_unicode=True, +): + import ruyaml # NOQA + + yaml = ruyaml.YAML() + yaml.indent(mapping=indent, sequence=indent, offset=block_seq_indent) + if default_flow_style is not unset: + yaml.default_flow_style = default_flow_style + yaml.top_level_colon_align = top_level_colon_align + yaml.prefix_colon = prefix_colon + yaml.explicit_start = explicit_start + yaml.explicit_end = explicit_end + yaml.version = version + yaml.allow_unicode = allow_unicode + if stream is not None: + yaml.dump(data, stream=stream) + return + buf = io.StringIO() + yaml.dump(data, stream=buf) + return buf.getvalue() + + +def round_trip_dump_all( + data, + stream=None, # *, + indent=None, + block_seq_indent=None, + default_flow_style=unset, + top_level_colon_align=None, + prefix_colon=None, + explicit_start=None, + explicit_end=None, + version=None, + allow_unicode=None, +): + yaml = ruyaml.YAML() + yaml.indent(mapping=indent, sequence=indent, offset=block_seq_indent) + if default_flow_style is not unset: + yaml.default_flow_style = default_flow_style + yaml.top_level_colon_align = top_level_colon_align + yaml.prefix_colon = prefix_colon + yaml.explicit_start = explicit_start + yaml.explicit_end = explicit_end + yaml.version = version + yaml.allow_unicode = allow_unicode + if stream is not None: + yaml.dump(data, stream=stream) + return + buf = io.StringIO() + yaml.dump_all(data, stream=buf) + return buf.getvalue() + + +def diff(inp, outp, file_name='stdin'): + import difflib + + inl = inp.splitlines(True) # True for keepends + outl = outp.splitlines(True) + diff = difflib.unified_diff(inl, outl, file_name, 'round trip YAML') + for line in diff: + sys.stdout.write(line) + + +def round_trip( + inp, + outp=None, + extra=None, + intermediate=None, + indent=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + preserve_quotes=None, + explicit_start=None, + explicit_end=None, + version=None, + dump_data=None, +): + """ + inp: input string to parse + outp: expected output (equals input if not specified) + """ + if outp is None: + outp = inp + doutp = dedent(outp) + if extra is not None: + doutp += extra + data = round_trip_load(inp, preserve_quotes=preserve_quotes) + if dump_data: + print('data', data) + if intermediate is not None: + if isinstance(intermediate, dict): + for k, v in intermediate.items(): + if data[k] != v: + print('{0!r} <> {1!r}'.format(data[k], v)) + raise ValueError + res = round_trip_dump( + data, + indent=indent, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + ) + if res != doutp: + diff(doutp, res, 'input string') + print('\nroundtrip data:\n', res, sep="") + assert res == doutp + res = round_trip_dump( + data, + indent=indent, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + ) + print('roundtrip second round data:\n', res, sep="") + assert res == doutp + return data + + +def na_round_trip( + inp, + outp=None, + extra=None, + intermediate=None, + indent=None, + top_level_colon_align=None, + prefix_colon=None, + preserve_quotes=None, + explicit_start=None, + explicit_end=None, + version=None, + dump_data=None, +): + """ + inp: input string to parse + outp: expected output (equals input if not specified) + """ + inp = dedent(inp) + if outp is None: + outp = inp + if version is not None: + version = version + doutp = dedent(outp) + if extra is not None: + doutp += extra + yaml = YAML() + yaml.preserve_quotes = preserve_quotes + yaml.scalar_after_indicator = False # newline after every directives end + data = yaml.load(inp) + if dump_data: + print('data', data) + if intermediate is not None: + if isinstance(intermediate, dict): + for k, v in intermediate.items(): + if data[k] != v: + print('{0!r} <> {1!r}'.format(data[k], v)) + raise ValueError + yaml.indent = indent + yaml.top_level_colon_align = top_level_colon_align + yaml.prefix_colon = prefix_colon + yaml.explicit_start = explicit_start + yaml.explicit_end = explicit_end + res = yaml.dump(data, compare=doutp) + return res + + +def YAML(**kw): + import ruyaml # NOQA + + class MyYAML(ruyaml.YAML): + """auto dedent string parameters on load""" + + def load(self, stream): + if isinstance(stream, str): + if stream and stream[0] == '\n': + stream = stream[1:] + stream = textwrap.dedent(stream) + return ruyaml.YAML.load(self, stream) + + def load_all(self, stream): + if isinstance(stream, str): + if stream and stream[0] == '\n': + stream = stream[1:] + stream = textwrap.dedent(stream) + for d in ruyaml.YAML.load_all(self, stream): + yield d + + def dump(self, data, **kw): + from io import BytesIO, StringIO # NOQA + + assert ('stream' in kw) ^ ('compare' in kw) + if 'stream' in kw: + return ruyaml.YAML.dump(data, **kw) + lkw = kw.copy() + expected = textwrap.dedent(lkw.pop('compare')) + unordered_lines = lkw.pop('unordered_lines', False) + if expected and expected[0] == '\n': + expected = expected[1:] + lkw['stream'] = st = StringIO() + ruyaml.YAML.dump(self, data, **lkw) + res = st.getvalue() + print(res) + if unordered_lines: + res = sorted(res.splitlines()) + expected = sorted(expected.splitlines()) + assert res == expected + + def round_trip(self, stream, **kw): + from io import BytesIO, StringIO # NOQA + + assert isinstance(stream, str) + lkw = kw.copy() + if stream and stream[0] == '\n': + stream = stream[1:] + stream = textwrap.dedent(stream) + data = ruyaml.YAML.load(self, stream) + outp = lkw.pop('outp', stream) + lkw['stream'] = st = StringIO() + ruyaml.YAML.dump(self, data, **lkw) + res = st.getvalue() + if res != outp: + diff(outp, res, 'input string') + assert res == outp + + def round_trip_all(self, stream, **kw): + from io import BytesIO, StringIO # NOQA + + assert isinstance(stream, str) + lkw = kw.copy() + if stream and stream[0] == '\n': + stream = stream[1:] + stream = textwrap.dedent(stream) + data = list(ruyaml.YAML.load_all(self, stream)) + outp = lkw.pop('outp', stream) + lkw['stream'] = st = StringIO() + ruyaml.YAML.dump_all(self, data, **lkw) + res = st.getvalue() + if res != outp: + diff(outp, res, 'input string') + assert res == outp + + return MyYAML(**kw) + + +def save_and_run(program, base_dir=None, output=None, file_name=None, optimized=False): + """ + safe and run a python program, thereby circumventing any restrictions on module level + imports + """ + from subprocess import STDOUT, CalledProcessError, check_output + + if not hasattr(base_dir, 'hash'): + base_dir = Path(str(base_dir)) + if file_name is None: + file_name = 'safe_and_run_tmp.py' + file_name = base_dir / file_name + file_name.write_text(dedent(program)) + + try: + cmd = [sys.executable, '-Wd'] + if optimized: + cmd.append('-O') + cmd.append(str(file_name)) + print('running:', *cmd) + # 3.5 needs strings + res = check_output( + cmd, stderr=STDOUT, universal_newlines=True, cwd=str(base_dir) + ) + if output is not None: + if '__pypy__' in sys.builtin_module_names: + res = res.splitlines(True) + res = [line for line in res if 'no version info' not in line] + res = ''.join(res) + print('result: ', res, end='') + print('expected:', output, end='') + assert res == output + except CalledProcessError as exception: + print("##### Running '{} {}' FAILED #####".format(sys.executable, file_name)) + print(exception.output) + return exception.returncode + return 0 diff --git a/_test/test_a_dedent.py b/_test/test_a_dedent.py new file mode 100644 index 0000000..23729f0 --- /dev/null +++ b/_test/test_a_dedent.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +from .roundtrip import dedent + + +class TestDedent: + def test_start_newline(self): + # fmt: off + x = dedent(""" + 123 + 456 + """) + # fmt: on + assert x == '123\n 456\n' + + def test_start_space_newline(self): + # special construct to prevent stripping of following whitespace + # fmt: off + x = dedent(" " """ + 123 + """) + # fmt: on + assert x == '123\n' + + def test_start_no_newline(self): + # special construct to prevent stripping of following whitespac + x = dedent( + """\ + 123 + 456 + """ + ) + assert x == '123\n 456\n' + + def test_preserve_no_newline_at_end(self): + x = dedent( + """ + 123""" + ) + assert x == '123' + + def test_preserve_no_newline_at_all(self): + x = dedent( + """\ + 123""" + ) + assert x == '123' + + def test_multiple_dedent(self): + x = dedent( + dedent( + """ + 123 + """ + ) + ) + assert x == '123\n' diff --git a/_test/test_add_xxx.py b/_test/test_add_xxx.py new file mode 100644 index 0000000..ee42bf1 --- /dev/null +++ b/_test/test_add_xxx.py @@ -0,0 +1,184 @@ +# coding: utf-8 + +import re + +import pytest # NOQA + +from .roundtrip import dedent, round_trip_dump # NOQA + + +# from PyYAML docs +class Dice(tuple): + def __new__(cls, a, b): + return tuple.__new__(cls, [a, b]) + + def __repr__(self): + return 'Dice(%s,%s)' % self + + +def dice_constructor(loader, node): + value = loader.construct_scalar(node) + a, b = map(int, value.split('d')) + return Dice(a, b) + + +def dice_representer(dumper, data): + return dumper.represent_scalar('!dice', '{}d{}'.format(*data)) + + +def test_dice_constructor(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + ruyaml.add_constructor('!dice', dice_constructor) + data = yaml.load('initial hit points: !dice 8d4') + assert str(data) == "{'initial hit points': Dice(8,4)}" + + +def test_dice_constructor_with_loader(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + ruyaml.add_constructor('!dice', dice_constructor, Loader=ruyaml.Loader) + data = yaml.load('initial hit points: !dice 8d4') + assert str(data) == "{'initial hit points': Dice(8,4)}" + + +def test_dice_representer(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + yaml.default_flow_style = False + ruyaml.add_representer(Dice, dice_representer) + # ruyaml 0.15.8+ no longer forces quotes tagged scalars + buf = ruyaml.compat.StringIO() + yaml.dump(dict(gold=Dice(10, 6)), buf) + assert buf.getvalue() == 'gold: !dice 10d6\n' + + +def test_dice_implicit_resolver(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + yaml.default_flow_style = False + pattern = re.compile(r'^\d+d\d+$') + ruyaml.add_implicit_resolver('!dice', pattern) + buf = ruyaml.compat.StringIO() + yaml.dump(dict(treasure=Dice(10, 20)), buf) + assert buf.getvalue() == 'treasure: 10d20\n' + assert yaml.load('damage: 5d10') == dict(damage=Dice(5, 10)) + + +class Obj1(dict): + def __init__(self, suffix): + self._suffix = suffix + self._node = None + + def add_node(self, n): + self._node = n + + def __repr__(self): + return 'Obj1(%s->%s)' % (self._suffix, self.items()) + + def dump(self): + return repr(self._node) + + +class YAMLObj1(object): + yaml_tag = '!obj:' + + @classmethod + def from_yaml(cls, loader, suffix, node): + import ruyaml # NOQA + + obj1 = Obj1(suffix) + if isinstance(node, ruyaml.MappingNode): + obj1.add_node(loader.construct_mapping(node)) + else: + raise NotImplementedError + return obj1 + + @classmethod + def to_yaml(cls, dumper, data): + return dumper.represent_scalar(cls.yaml_tag + data._suffix, data.dump()) + + +def test_yaml_obj(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + ruyaml.add_representer(Obj1, YAMLObj1.to_yaml) + ruyaml.add_multi_constructor(YAMLObj1.yaml_tag, YAMLObj1.from_yaml) + x = yaml.load('!obj:x.2\na: 1') + print(x) + buf = ruyaml.compat.StringIO() + yaml.dump(x, buf) + assert buf.getvalue() == """!obj:x.2 "{'a': 1}"\n""" + + +def test_yaml_obj_with_loader_and_dumper(): + import ruyaml # NOQA + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + ruyaml.add_representer(Obj1, YAMLObj1.to_yaml, Dumper=ruyaml.Dumper) + ruyaml.add_multi_constructor( + YAMLObj1.yaml_tag, YAMLObj1.from_yaml, Loader=ruyaml.Loader + ) + x = yaml.load('!obj:x.2\na: 1') + # x = ruyaml.load('!obj:x.2\na: 1') + print(x) + buf = ruyaml.compat.StringIO() + yaml.dump(x, buf) + assert buf.getvalue() == """!obj:x.2 "{'a': 1}"\n""" + + +# ToDo use nullege to search add_multi_representer and add_path_resolver +# and add some test code + +# Issue 127 reported by Tommy Wang + + +def test_issue_127(): + import ruyaml # NOQA + + class Ref(ruyaml.YAMLObject): + yaml_constructor = ruyaml.RoundTripConstructor + yaml_representer = ruyaml.RoundTripRepresenter + yaml_tag = '!Ref' + + def __init__(self, logical_id): + self.logical_id = logical_id + + @classmethod + def from_yaml(cls, loader, node): + return cls(loader.construct_scalar(node)) + + @classmethod + def to_yaml(cls, dumper, data): + if isinstance(data.logical_id, ruyaml.scalarstring.ScalarString): + style = data.logical_id.style # ruyaml>0.15.8 + else: + style = None + return dumper.represent_scalar(cls.yaml_tag, data.logical_id, style=style) + + document = dedent( + """\ + AList: + - !Ref One + - !Ref 'Two' + - !Ref + Two and a half + BList: [!Ref Three, !Ref "Four"] + CList: + - Five Six + - 'Seven Eight' + """ + ) + yaml = ruyaml.YAML() + yaml.preserve_quotes = True + yaml.default_flow_style = None + yaml.indent(sequence=4, offset=2) + data = yaml.load(document) + buf = ruyaml.compat.StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == document.replace('\n Two and', ' Two and') diff --git a/_test/test_anchor.py b/_test/test_anchor.py new file mode 100644 index 0000000..5003428 --- /dev/null +++ b/_test/test_anchor.py @@ -0,0 +1,608 @@ +# coding: utf-8 + +""" +testing of anchors and the aliases referring to them +""" + +import platform +from textwrap import dedent + +import pytest + +from .roundtrip import ( # NOQA + YAML, + dedent, + round_trip, + round_trip_dump, + round_trip_load, +) + + +def load(s): + return round_trip_load(dedent(s)) + + +def compare(d, s): + assert round_trip_dump(d) == dedent(s) + + +class TestAnchorsAliases: + def test_anchor_id_renumber(self): + from ruyaml.serializer import Serializer + + assert Serializer.ANCHOR_TEMPLATE == 'id%03d' + data = load( + """ + a: &id002 + b: 1 + c: 2 + d: *id002 + """ + ) + compare( + data, + """ + a: &id001 + b: 1 + c: 2 + d: *id001 + """, + ) + + def test_template_matcher(self): + """test if id matches the anchor template""" + from ruyaml.serializer import templated_id + + assert templated_id('id001') + assert templated_id('id999') + assert templated_id('id1000') + assert templated_id('id0001') + assert templated_id('id0000') + assert not templated_id('id02') + assert not templated_id('id000') + assert not templated_id('x000') + + # def test_re_matcher(self): + # import re + # assert re.compile('id(?!000)\\d{3,}').match('id001') + # assert not re.compile('id(?!000\\d*)\\d{3,}').match('id000') + # assert re.compile('id(?!000$)\\d{3,}').match('id0001') + + def test_anchor_assigned(self): + from ruyaml.comments import CommentedMap + + data = load( + """ + a: &id002 + b: 1 + c: 2 + d: *id002 + e: &etemplate + b: 1 + c: 2 + f: *etemplate + """ + ) + d = data['d'] + assert isinstance(d, CommentedMap) + assert d.yaml_anchor() is None # got dropped as it matches pattern + e = data['e'] + assert isinstance(e, CommentedMap) + assert e.yaml_anchor().value == 'etemplate' + assert e.yaml_anchor().always_dump is False + + def test_anchor_id_retained(self): + data = load( + """ + a: &id002 + b: 1 + c: 2 + d: *id002 + e: &etemplate + b: 1 + c: 2 + f: *etemplate + """ + ) + compare( + data, + """ + a: &id001 + b: 1 + c: 2 + d: *id001 + e: &etemplate + b: 1 + c: 2 + f: *etemplate + """, + ) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_alias_before_anchor(self): + from ruyaml.composer import ComposerError + + with pytest.raises(ComposerError): + data = load( + """ + d: *id002 + a: &id002 + b: 1 + c: 2 + """ + ) + data = data + + def test_anchor_on_sequence(self): + # as reported by Bjorn Stabell + # https://bitbucket.org/ruyaml/issue/7/anchor-names-not-preserved + from ruyaml.comments import CommentedSeq + + data = load( + """ + nut1: &alice + - 1 + - 2 + nut2: &blake + - some data + - *alice + nut3: + - *blake + - *alice + """ + ) + r = data['nut1'] + assert isinstance(r, CommentedSeq) + assert r.yaml_anchor() is not None + assert r.yaml_anchor().value == 'alice' + + merge_yaml = dedent( + """ + - &CENTER {x: 1, y: 2} + - &LEFT {x: 0, y: 2} + - &BIG {r: 10} + - &SMALL {r: 1} + # All the following maps are equal: + # Explicit keys + - x: 1 + y: 2 + r: 10 + label: center/small + # Merge one map + - <<: *CENTER + r: 10 + label: center/medium + # Merge multiple maps + - <<: [*CENTER, *BIG] + label: center/big + # Override + - <<: [*BIG, *LEFT, *SMALL] + x: 1 + label: center/huge + """ + ) + + def test_merge_00(self): + data = load(self.merge_yaml) + d = data[4] + ok = True + for k in d: + for o in [5, 6, 7]: + x = d.get(k) + y = data[o].get(k) + if not isinstance(x, int): + x = x.split('/')[0] + y = y.split('/')[0] + if x != y: + ok = False + print('key', k, d.get(k), data[o].get(k)) + assert ok + + def test_merge_accessible(self): + from ruyaml.comments import CommentedMap, merge_attrib + + data = load( + """ + k: &level_2 { a: 1, b2 } + l: &level_1 { a: 10, c: 3 } + m: + <<: *level_1 + c: 30 + d: 40 + """ + ) + d = data['m'] + assert isinstance(d, CommentedMap) + assert hasattr(d, merge_attrib) + + def test_merge_01(self): + data = load(self.merge_yaml) + compare(data, self.merge_yaml) + + def test_merge_nested(self): + yaml = """ + a: + <<: &content + 1: plugh + 2: plover + 0: xyzzy + b: + <<: *content + """ + data = round_trip(yaml) # NOQA + + def test_merge_nested_with_sequence(self): + yaml = """ + a: + <<: &content + <<: &y2 + 1: plugh + 2: plover + 0: xyzzy + b: + <<: [*content, *y2] + """ + data = round_trip(yaml) # NOQA + + def test_add_anchor(self): + from ruyaml.comments import CommentedMap + + data = CommentedMap() + data_a = CommentedMap() + data['a'] = data_a + data_a['c'] = 3 + data['b'] = 2 + data.yaml_set_anchor('klm', always_dump=True) + data['a'].yaml_set_anchor('xyz', always_dump=True) + compare( + data, + """ + &klm + a: &xyz + c: 3 + b: 2 + """, + ) + + # this is an error in PyYAML + def test_reused_anchor(self): + from ruyaml.error import ReusedAnchorWarning + + yaml = """ + - &a + x: 1 + - <<: *a + - &a + x: 2 + - <<: *a + """ + with pytest.warns(ReusedAnchorWarning): + data = round_trip(yaml) # NOQA + + def test_issue_130(self): + # issue 130 reported by Devid Fee + import ruyaml + + ys = dedent( + """\ + components: + server: &server_component + type: spark.server:ServerComponent + host: 0.0.0.0 + port: 8000 + shell: &shell_component + type: spark.shell:ShellComponent + + services: + server: &server_service + <<: *server_component + shell: &shell_service + <<: *shell_component + components: + server: {<<: *server_service} + """ + ) + yaml = ruyaml.YAML(typ='safe', pure=True) + data = yaml.load(ys) + assert data['services']['shell']['components']['server']['port'] == 8000 + + def test_issue_130a(self): + # issue 130 reported by Devid Fee + import ruyaml + + ys = dedent( + """\ + components: + server: &server_component + type: spark.server:ServerComponent + host: 0.0.0.0 + port: 8000 + shell: &shell_component + type: spark.shell:ShellComponent + + services: + server: &server_service + <<: *server_component + port: 4000 + shell: &shell_service + <<: *shell_component + components: + server: {<<: *server_service} + """ + ) + yaml = ruyaml.YAML(typ='safe', pure=True) + data = yaml.load(ys) + assert data['services']['shell']['components']['server']['port'] == 4000 + + +class TestMergeKeysValues: + + yaml_str = dedent( + """\ + - &mx + a: x1 + b: x2 + c: x3 + - &my + a: y1 + b: y2 # masked by the one in &mx + d: y4 + - + a: 1 + <<: [*mx, *my] + m: 6 + """ + ) + + # in the following d always has "expanded" the merges + + def test_merge_for(self): + from ruyaml import YAML + + d = YAML(typ='safe', pure=True).load(self.yaml_str) + data = round_trip_load(self.yaml_str) + count = 0 + for x in data[2]: + count += 1 + print(count, x) + assert count == len(d[2]) + + def test_merge_keys(self): + from ruyaml import YAML + + d = YAML(typ='safe', pure=True).load(self.yaml_str) + data = round_trip_load(self.yaml_str) + count = 0 + for x in data[2].keys(): + count += 1 + print(count, x) + assert count == len(d[2]) + + def test_merge_values(self): + from ruyaml import YAML + + d = YAML(typ='safe', pure=True).load(self.yaml_str) + data = round_trip_load(self.yaml_str) + count = 0 + for x in data[2].values(): + count += 1 + print(count, x) + assert count == len(d[2]) + + def test_merge_items(self): + from ruyaml import YAML + + d = YAML(typ='safe', pure=True).load(self.yaml_str) + data = round_trip_load(self.yaml_str) + count = 0 + for x in data[2].items(): + count += 1 + print(count, x) + assert count == len(d[2]) + + def test_len_items_delete(self): + from ruyaml import YAML + + d = YAML(typ='safe', pure=True).load(self.yaml_str) + data = round_trip_load(self.yaml_str) + x = data[2].items() + print('d2 items', d[2].items(), len(d[2].items()), x, len(x)) + ref = len(d[2].items()) + print('ref', ref) + assert len(x) == ref + del data[2]['m'] + ref -= 1 + assert len(x) == ref + del data[2]['d'] + ref -= 1 + assert len(x) == ref + del data[2]['a'] + ref -= 1 + assert len(x) == ref + + def test_issue_196_cast_of_dict(self, capsys): + from ruyaml import YAML + + yaml = YAML() + mapping = yaml.load( + """\ + anchored: &anchor + a : 1 + + mapping: + <<: *anchor + b: 2 + """ + )['mapping'] + + for k in mapping: + print('k', k) + for k in mapping.copy(): + print('kc', k) + + print('v', list(mapping.keys())) + print('v', list(mapping.values())) + print('v', list(mapping.items())) + print(len(mapping)) + print('-----') + + # print({**mapping}) + # print(type({**mapping})) + # assert 'a' in {**mapping} + assert 'a' in mapping + x = {} + for k in mapping: + x[k] = mapping[k] + assert 'a' in x + assert 'a' in mapping.keys() + assert mapping['a'] == 1 + assert mapping.__getitem__('a') == 1 + assert 'a' in dict(mapping) + assert 'a' in dict(mapping.items()) + + def test_values_of_merged(self): + from ruyaml import YAML + + yaml = YAML() + data = yaml.load(dedent(self.yaml_str)) + assert list(data[2].values()) == [1, 6, 'x2', 'x3', 'y4'] + + def test_issue_213_copy_of_merge(self): + from ruyaml import YAML + + yaml = YAML() + d = yaml.load( + """\ + foo: &foo + a: a + foo2: + <<: *foo + b: b + """ + )['foo2'] + assert d['a'] == 'a' + d2 = d.copy() + assert d2['a'] == 'a' + print('d', d) + del d['a'] + assert 'a' not in d + assert 'a' in d2 + + def test_dup_merge(self): + from ruyaml import YAML + + yaml = YAML() + yaml.allow_duplicate_keys = True + d = yaml.load( + """\ + foo: &f + a: a + foo2: &g + b: b + all: + <<: *f + <<: *g + """ + )['all'] + assert d == {'a': 'a', 'b': 'b'} + + def test_dup_merge_fail(self): + from ruyaml import YAML + from ruyaml.constructor import DuplicateKeyError + + yaml = YAML() + yaml.allow_duplicate_keys = False + with pytest.raises(DuplicateKeyError): + yaml.load( + """\ + foo: &f + a: a + foo2: &g + b: b + all: + <<: *f + <<: *g + """ + ) + + +class TestDuplicateKeyThroughAnchor: + def test_duplicate_key_00(self): + from ruyaml import YAML, version_info + from ruyaml.constructor import DuplicateKeyError, DuplicateKeyFutureWarning + + s = dedent( + """\ + &anchor foo: + foo: bar + *anchor : duplicate key + baz: bat + *anchor : duplicate key + """ + ) + if version_info < (0, 15, 1): + pass + elif version_info < (0, 16, 0): + with pytest.warns(DuplicateKeyFutureWarning): + YAML(typ='safe', pure=True).load(s) + with pytest.warns(DuplicateKeyFutureWarning): + YAML(typ='rt').load(s) + else: + with pytest.raises(DuplicateKeyError): + YAML(typ='safe', pure=True).load(s) + with pytest.raises(DuplicateKeyError): + YAML(typ='rt').load(s) + + def test_duplicate_key_01(self): + # so issue https://stackoverflow.com/a/52852106/1307905 + from ruyaml.constructor import DuplicateKeyError + + s = dedent( + """\ + - &name-name + a: 1 + - &help-name + b: 2 + - <<: *name-name + <<: *help-name + """ + ) + with pytest.raises(DuplicateKeyError): + yaml = YAML(typ='safe') + yaml.load(s) + with pytest.raises(DuplicateKeyError): + yaml = YAML() + yaml.load(s) + + +class TestFullCharSetAnchors: + def test_master_of_orion(self): + # https://bitbucket.org/ruyaml/issues/72/not-allowed-in-anchor-names + # submitted by Shalon Wood + yaml_str = """ + - collection: &Backend.Civilizations.RacialPerk + items: + - key: perk_population_growth_modifier + - *Backend.Civilizations.RacialPerk + """ + data = load(yaml_str) # NOQA + + def test_roundtrip_00(self): + yaml_str = """ + - &dotted.words.here + a: 1 + b: 2 + - *dotted.words.here + """ + data = round_trip(yaml_str) # NOQA + + def test_roundtrip_01(self): + yaml_str = """ + - &dotted.words.here[a, b] + - *dotted.words.here + """ + data = load(yaml_str) # NOQA + compare(data, yaml_str.replace('[', ' [')) # an extra space is inserted diff --git a/_test/test_api_change.py b/_test/test_api_change.py new file mode 100644 index 0000000..dd25fd9 --- /dev/null +++ b/_test/test_api_change.py @@ -0,0 +1,230 @@ +# coding: utf-8 + +""" +testing of anchors and the aliases referring to them +""" + +import sys +import textwrap +from pathlib import Path + +import pytest + + +class TestNewAPI: + def test_duplicate_keys_00(self): + from ruyaml import YAML + from ruyaml.constructor import DuplicateKeyError + + yaml = YAML() + with pytest.raises(DuplicateKeyError): + yaml.load('{a: 1, a: 2}') + + def test_duplicate_keys_01(self): + from ruyaml import YAML + from ruyaml.constructor import DuplicateKeyError + + yaml = YAML(typ='safe', pure=True) + with pytest.raises(DuplicateKeyError): + yaml.load('{a: 1, a: 2}') + + def test_duplicate_keys_02(self): + from ruyaml import YAML + from ruyaml.constructor import DuplicateKeyError + + yaml = YAML(typ='safe') + with pytest.raises(DuplicateKeyError): + yaml.load('{a: 1, a: 2}') + + def test_issue_135(self): + # reported by Andrzej Ostrowski + from ruyaml import YAML + + data = {'a': 1, 'b': 2} + yaml = YAML(typ='safe') + # originally on 2.7: with pytest.raises(TypeError): + yaml.dump(data, sys.stdout) + + def test_issue_135_temporary_workaround(self): + # never raised error + from ruyaml import YAML + + data = {'a': 1, 'b': 2} + yaml = YAML(typ='safe', pure=True) + yaml.dump(data, sys.stdout) + + +class TestWrite: + def test_dump_path(self, tmpdir): + from ruyaml import YAML + + fn = Path(str(tmpdir)) / 'test.yaml' + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + yaml.dump(data, fn) + assert fn.read_text() == 'a: 1\nb: 2\n' + + def test_dump_file(self, tmpdir): + from ruyaml import YAML + + fn = Path(str(tmpdir)) / 'test.yaml' + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + with open(str(fn), 'w') as fp: + yaml.dump(data, fp) + assert fn.read_text() == 'a: 1\nb: 2\n' + + def test_dump_missing_stream(self): + from ruyaml import YAML + + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + with pytest.raises(TypeError): + yaml.dump(data) + + def test_dump_too_many_args(self, tmpdir): + from ruyaml import YAML + + fn = Path(str(tmpdir)) / 'test.yaml' + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + with pytest.raises(TypeError): + yaml.dump(data, fn, True) + + def test_transform(self, tmpdir): + from ruyaml import YAML + + def tr(s): + return s.replace(' ', ' ') + + fn = Path(str(tmpdir)) / 'test.yaml' + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + yaml.dump(data, fn, transform=tr) + assert fn.read_text() == 'a: 1\nb: 2\n' + + def test_print(self, capsys): + from ruyaml import YAML + + yaml = YAML() + data = yaml.map() + data['a'] = 1 + data['b'] = 2 + yaml.dump(data, sys.stdout) + out, err = capsys.readouterr() + assert out == 'a: 1\nb: 2\n' + + +class TestRead: + def test_multi_load(self): + # make sure reader, scanner, parser get reset + from ruyaml import YAML + + yaml = YAML() + yaml.load('a: 1') + yaml.load('a: 1') # did not work in 0.15.4 + + def test_parse(self): + # ensure `parse` method is functional and can parse "unsafe" yaml + from ruyaml import YAML + from ruyaml.constructor import ConstructorError + + yaml = YAML(typ='safe') + s = '- !User0 {age: 18, name: Anthon}' + # should fail to load + with pytest.raises(ConstructorError): + yaml.load(s) + # should parse fine + yaml = YAML(typ='safe') + for _ in yaml.parse(s): + pass + + +class TestLoadAll: + def test_multi_document_load(self, tmpdir): + """this went wrong on 3.7 because of StopIteration, PR 37 and Issue 211""" + from ruyaml import YAML + + fn = Path(str(tmpdir)) / 'test.yaml' + fn.write_text( + textwrap.dedent( + """\ + --- + - a + --- + - b + ... + """ + ) + ) + yaml = YAML() + assert list(yaml.load_all(fn)) == [['a'], ['b']] + + +class TestDuplSet: + def test_dupl_set_00(self): + # round-trip-loader should except + from ruyaml import YAML + from ruyaml.constructor import DuplicateKeyError + + yaml = YAML() + with pytest.raises(DuplicateKeyError): + yaml.load( + textwrap.dedent( + """\ + !!set + ? a + ? b + ? c + ? a + """ + ) + ) + + +class TestDumpLoadUnicode: + # test triggered by SamH on stackoverflow (https://stackoverflow.com/q/45281596/1307905) + # and answer by randomir (https://stackoverflow.com/a/45281922/1307905) + def test_write_unicode(self, tmpdir): + from ruyaml import YAML + + yaml = YAML() + text_dict = {'text': 'HELLO_WORLD©'} + file_name = str(tmpdir) + '/tstFile.yaml' + yaml.dump(text_dict, open(file_name, 'w')) + assert open(file_name, 'rb').read().decode('utf-8') == 'text: HELLO_WORLD©\n' + + def test_read_unicode(self, tmpdir): + from ruyaml import YAML + + yaml = YAML() + file_name = str(tmpdir) + '/tstFile.yaml' + with open(file_name, 'wb') as fp: + fp.write('text: HELLO_WORLD©\n'.encode('utf-8')) + text_dict = yaml.load(open(file_name, 'r')) + assert text_dict['text'] == 'HELLO_WORLD©' + + +class TestFlowStyle: + def test_flow_style(self, capsys): + # https://stackoverflow.com/questions/45791712/ + from ruyaml import YAML + + yaml = YAML() + yaml.default_flow_style = None + data = yaml.map() + data['b'] = 1 + data['a'] = [[1, 2], [3, 4]] + yaml.dump(data, sys.stdout) + out, err = capsys.readouterr() + assert out == 'b: 1\na:\n- [1, 2]\n- [3, 4]\n' diff --git a/_test/test_class_register.py b/_test/test_class_register.py new file mode 100644 index 0000000..54c2191 --- /dev/null +++ b/_test/test_class_register.py @@ -0,0 +1,141 @@ +# coding: utf-8 + +""" +testing of YAML.register_class and @yaml_object +""" + +from .roundtrip import YAML + + +class User0: + def __init__(self, name, age): + self.name = name + self.age = age + + +class User1(object): + yaml_tag = '!user' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar( + cls.yaml_tag, '{.name}-{.age}'.format(node, node) + ) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + +class TestRegisterClass: + def test_register_0_rt(self): + yaml = YAML() + yaml.register_class(User0) + ys = """ + - !User0 + name: Anthon + age: 18 + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys, unordered_lines=True) + + def test_register_0_safe(self): + # default_flow_style = None + yaml = YAML(typ='safe') + yaml.register_class(User0) + ys = """ + - !User0 {age: 18, name: Anthon} + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_register_0_unsafe(self): + # default_flow_style = None + yaml = YAML(typ='unsafe') + yaml.register_class(User0) + ys = """ + - !User0 {age: 18, name: Anthon} + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_register_1_rt(self): + yaml = YAML() + yaml.register_class(User1) + ys = """ + - !user Anthon-18 + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_register_1_safe(self): + yaml = YAML(typ='safe') + yaml.register_class(User1) + ys = """ + [!user Anthon-18] + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_register_1_unsafe(self): + yaml = YAML(typ='unsafe') + yaml.register_class(User1) + ys = """ + [!user Anthon-18] + """ + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + +class TestDecorator: + def test_decorator_implicit(self): + from ruyaml import yaml_object + + yml = YAML() + + @yaml_object(yml) + class User2: + def __init__(self, name, age): + self.name = name + self.age = age + + ys = """ + - !User2 + name: Anthon + age: 18 + """ + d = yml.load(ys) + yml.dump(d, compare=ys, unordered_lines=True) + + def test_decorator_explicit(self): + from ruyaml import yaml_object + + yml = YAML() + + @yaml_object(yml) + class User3(object): + yaml_tag = '!USER' + + def __init__(self, name, age): + self.name = name + self.age = age + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_scalar( + cls.yaml_tag, '{.name}-{.age}'.format(node, node) + ) + + @classmethod + def from_yaml(cls, constructor, node): + return cls(*node.value.split('-')) + + ys = """ + - !USER Anthon-18 + """ + d = yml.load(ys) + yml.dump(d, compare=ys) diff --git a/_test/test_collections.py b/_test/test_collections.py new file mode 100644 index 0000000..579e30f --- /dev/null +++ b/_test/test_collections.py @@ -0,0 +1,19 @@ +# coding: utf-8 + +""" +collections.OrderedDict is a new class not supported by PyYAML (issue 83 by Frazer McLean) + +This is now so integrated in Python that it can be mapped to !!omap + +""" + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +class TestOrderedDict: + def test_ordereddict(self): + from collections import OrderedDict + + assert round_trip_dump(OrderedDict()) == '!!omap []\n' diff --git a/_test/test_comment_manipulation.py b/_test/test_comment_manipulation.py new file mode 100644 index 0000000..39fde99 --- /dev/null +++ b/_test/test_comment_manipulation.py @@ -0,0 +1,721 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +def load(s): + return round_trip_load(dedent(s)) + + +def compare(data, s, **kw): + assert round_trip_dump(data, **kw) == dedent(s) + + +def compare_eol(data, s): + assert 'EOL' in s + ds = dedent(s).replace('EOL', '').replace('\n', '|\n') + assert round_trip_dump(data).replace('\n', '|\n') == ds + + +class TestCommentsManipulation: + + # list + def test_seq_set_comment_on_existing_explicit_column(self): + data = load( + """ + - a # comment 1 + - b + - c + """ + ) + data.yaml_add_eol_comment('comment 2', key=1, column=6) + exp = """ + - a # comment 1 + - b # comment 2 + - c + """ + compare(data, exp) + + def test_seq_overwrite_comment_on_existing_explicit_column(self): + data = load( + """ + - a # comment 1 + - b + - c + """ + ) + data.yaml_add_eol_comment('comment 2', key=0, column=6) + exp = """ + - a # comment 2 + - b + - c + """ + compare(data, exp) + + def test_seq_first_comment_explicit_column(self): + data = load( + """ + - a + - b + - c + """ + ) + data.yaml_add_eol_comment('comment 1', key=1, column=6) + exp = """ + - a + - b # comment 1 + - c + """ + compare(data, exp) + + def test_seq_set_comment_on_existing_column_prev(self): + data = load( + """ + - a # comment 1 + - b + - c + - d # comment 3 + """ + ) + data.yaml_add_eol_comment('comment 2', key=1) + exp = """ + - a # comment 1 + - b # comment 2 + - c + - d # comment 3 + """ + compare(data, exp) + + def test_seq_set_comment_on_existing_column_next(self): + data = load( + """ + - a # comment 1 + - b + - c + - d # comment 3 + """ + ) + print(data._yaml_comment) + # print(type(data._yaml_comment._items[0][0].start_mark)) + # ruyaml.error.Mark + # print(type(data._yaml_comment._items[0][0].start_mark)) + data.yaml_add_eol_comment('comment 2', key=2) + exp = """ + - a # comment 1 + - b + - c # comment 2 + - d # comment 3 + """ + compare(data, exp) + + def test_seq_set_comment_on_existing_column_further_away(self): + """ + no comment line before or after, take the latest before + the new position + """ + data = load( + """ + - a # comment 1 + - b + - c + - d + - e + - f # comment 3 + """ + ) + print(data._yaml_comment) + # print(type(data._yaml_comment._items[0][0].start_mark)) + # ruyaml.error.Mark + # print(type(data._yaml_comment._items[0][0].start_mark)) + data.yaml_add_eol_comment('comment 2', key=3) + exp = """ + - a # comment 1 + - b + - c + - d # comment 2 + - e + - f # comment 3 + """ + compare(data, exp) + + def test_seq_set_comment_on_existing_explicit_column_with_hash(self): + data = load( + """ + - a # comment 1 + - b + - c + """ + ) + data.yaml_add_eol_comment('# comment 2', key=1, column=6) + exp = """ + - a # comment 1 + - b # comment 2 + - c + """ + compare(data, exp) + + # dict + + def test_dict_set_comment_on_existing_explicit_column(self): + data = load( + """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 + e: 5 + """ + ) + data.yaml_add_eol_comment('comment 2', key='c', column=7) + exp = """ + a: 1 # comment 1 + b: 2 + c: 3 # comment 2 + d: 4 + e: 5 + """ + compare(data, exp) + + def test_dict_overwrite_comment_on_existing_explicit_column(self): + data = load( + """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 + e: 5 + """ + ) + data.yaml_add_eol_comment('comment 2', key='a', column=7) + exp = """ + a: 1 # comment 2 + b: 2 + c: 3 + d: 4 + e: 5 + """ + compare(data, exp) + + def test_map_set_comment_on_existing_column_prev(self): + data = load( + """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 + e: 5 # comment 3 + """ + ) + data.yaml_add_eol_comment('comment 2', key='b') + exp = """ + a: 1 # comment 1 + b: 2 # comment 2 + c: 3 + d: 4 + e: 5 # comment 3 + """ + compare(data, exp) + + def test_map_set_comment_on_existing_column_next(self): + data = load( + """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 + e: 5 # comment 3 + """ + ) + data.yaml_add_eol_comment('comment 2', key='d') + exp = """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 # comment 2 + e: 5 # comment 3 + """ + compare(data, exp) + + def test_map_set_comment_on_existing_column_further_away(self): + """ + no comment line before or after, take the latest before + the new position + """ + data = load( + """ + a: 1 # comment 1 + b: 2 + c: 3 + d: 4 + e: 5 # comment 3 + """ + ) + data.yaml_add_eol_comment('comment 2', key='c') + print(round_trip_dump(data)) + exp = """ + a: 1 # comment 1 + b: 2 + c: 3 # comment 2 + d: 4 + e: 5 # comment 3 + """ + compare(data, exp) + + def test_before_top_map_rt(self): + data = load( + """ + a: 1 + b: 2 + """ + ) + data.yaml_set_start_comment('Hello\nWorld\n') + exp = """ + # Hello + # World + a: 1 + b: 2 + """ + compare(data, exp.format(comment='#')) + + def test_before_top_map_replace(self): + data = load( + """ + # abc + # def + a: 1 # 1 + b: 2 + """ + ) + data.yaml_set_start_comment('Hello\nWorld\n') + exp = """ + # Hello + # World + a: 1 # 1 + b: 2 + """ + compare(data, exp.format(comment='#')) + + def test_before_top_map_from_scratch(self): + from ruyaml.comments import CommentedMap + + data = CommentedMap() + data['a'] = 1 + data['b'] = 2 + data.yaml_set_start_comment('Hello\nWorld\n') + # print(data.ca) + # print(data.ca._items) + exp = """ + # Hello + # World + a: 1 + b: 2 + """ + compare(data, exp.format(comment='#')) + + def test_before_top_seq_rt(self): + data = load( + """ + - a + - b + """ + ) + data.yaml_set_start_comment('Hello\nWorld\n') + print(round_trip_dump(data)) + exp = """ + # Hello + # World + - a + - b + """ + compare(data, exp) + + def test_before_top_seq_rt_replace(self): + s = """ + # this + # that + - a + - b + """ + data = load(s.format(comment='#')) + data.yaml_set_start_comment('Hello\nWorld\n') + print(round_trip_dump(data)) + exp = """ + # Hello + # World + - a + - b + """ + compare(data, exp.format(comment='#')) + + def test_before_top_seq_from_scratch(self): + from ruyaml.comments import CommentedSeq + + data = CommentedSeq() + data.append('a') + data.append('b') + data.yaml_set_start_comment('Hello\nWorld\n') + print(round_trip_dump(data)) + exp = """ + # Hello + # World + - a + - b + """ + compare(data, exp.format(comment='#')) + + # nested variants + def test_before_nested_map_rt(self): + data = load( + """ + a: 1 + b: + c: 2 + d: 3 + """ + ) + data['b'].yaml_set_start_comment('Hello\nWorld\n') + exp = """ + a: 1 + b: + # Hello + # World + c: 2 + d: 3 + """ + compare(data, exp.format(comment='#')) + + def test_before_nested_map_rt_indent(self): + data = load( + """ + a: 1 + b: + c: 2 + d: 3 + """ + ) + data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2) + exp = """ + a: 1 + b: + # Hello + # World + c: 2 + d: 3 + """ + compare(data, exp.format(comment='#')) + print(data['b'].ca) + + def test_before_nested_map_from_scratch(self): + from ruyaml.comments import CommentedMap + + data = CommentedMap() + datab = CommentedMap() + data['a'] = 1 + data['b'] = datab + datab['c'] = 2 + datab['d'] = 3 + data['b'].yaml_set_start_comment('Hello\nWorld\n') + exp = """ + a: 1 + b: + # Hello + # World + c: 2 + d: 3 + """ + compare(data, exp.format(comment='#')) + + def test_before_nested_seq_from_scratch(self): + from ruyaml.comments import CommentedMap, CommentedSeq + + data = CommentedMap() + datab = CommentedSeq() + data['a'] = 1 + data['b'] = datab + datab.append('c') + datab.append('d') + data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2) + exp = """ + a: 1 + b: + # Hello + # World + - c + - d + """ + compare(data, exp.format(comment='#')) + + def test_before_nested_seq_from_scratch_block_seq_indent(self): + from ruyaml.comments import CommentedMap, CommentedSeq + + data = CommentedMap() + datab = CommentedSeq() + data['a'] = 1 + data['b'] = datab + datab.append('c') + datab.append('d') + data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2) + exp = """ + a: 1 + b: + # Hello + # World + - c + - d + """ + compare(data, exp.format(comment='#'), indent=4, block_seq_indent=2) + + def test_map_set_comment_before_and_after_non_first_key_00(self): + # http://stackoverflow.com/a/40705671/1307905 + data = load( + """ + xyz: + a: 1 # comment 1 + b: 2 + + test1: + test2: + test3: 3 + """ + ) + data.yaml_set_comment_before_after_key( + 'test1', 'before test1 (top level)', after='before test2' + ) + data['test1']['test2'].yaml_set_start_comment('after test2', indent=4) + exp = """ + xyz: + a: 1 # comment 1 + b: 2 + + # before test1 (top level) + test1: + # before test2 + test2: + # after test2 + test3: 3 + """ + compare(data, exp) + + def Xtest_map_set_comment_before_and_after_non_first_key_01(self): + data = load( + """ + xyz: + a: 1 # comment 1 + b: 2 + + test1: + test2: + test3: 3 + """ + ) + data.yaml_set_comment_before_after_key( + 'test1', 'before test1 (top level)', after='before test2\n\n' + ) + data['test1']['test2'].yaml_set_start_comment('after test2', indent=4) + # EOL is needed here as dedenting gets rid of spaces (as well as does Emacs + exp = """ + xyz: + a: 1 # comment 1 + b: 2 + + # before test1 (top level) + test1: + # before test2 + EOL + test2: + # after test2 + test3: 3 + """ + compare_eol(data, exp) + + # EOL is no longer necessary + # fixed together with issue # 216 + def test_map_set_comment_before_and_after_non_first_key_01(self): + data = load( + """ + xyz: + a: 1 # comment 1 + b: 2 + + test1: + test2: + test3: 3 + """ + ) + data.yaml_set_comment_before_after_key( + 'test1', 'before test1 (top level)', after='before test2\n\n' + ) + data['test1']['test2'].yaml_set_start_comment('after test2', indent=4) + exp = """ + xyz: + a: 1 # comment 1 + b: 2 + + # before test1 (top level) + test1: + # before test2 + + test2: + # after test2 + test3: 3 + """ + compare(data, exp) + + def Xtest_map_set_comment_before_and_after_non_first_key_02(self): + data = load( + """ + xyz: + a: 1 # comment 1 + b: 2 + + test1: + test2: + test3: 3 + """ + ) + data.yaml_set_comment_before_after_key( + 'test1', + 'xyz\n\nbefore test1 (top level)', + after='\nbefore test2', + after_indent=4, + ) + data['test1']['test2'].yaml_set_start_comment('after test2', indent=4) + # EOL is needed here as dedenting gets rid of spaces (as well as does Emacs + exp = """ + xyz: + a: 1 # comment 1 + b: 2 + + # xyz + + # before test1 (top level) + test1: + EOL + # before test2 + test2: + # after test2 + test3: 3 + """ + compare_eol(data, exp) + + def test_map_set_comment_before_and_after_non_first_key_02(self): + data = load( + """ + xyz: + a: 1 # comment 1 + b: 2 + + test1: + test2: + test3: 3 + """ + ) + data.yaml_set_comment_before_after_key( + 'test1', + 'xyz\n\nbefore test1 (top level)', + after='\nbefore test2', + after_indent=4, + ) + data['test1']['test2'].yaml_set_start_comment('after test2', indent=4) + exp = """ + xyz: + a: 1 # comment 1 + b: 2 + + # xyz + + # before test1 (top level) + test1: + + # before test2 + test2: + # after test2 + test3: 3 + """ + compare(data, exp) + + # issue 32 + def test_yaml_add_eol_comment_issue_32(self): + data = load( + """ + items: + - one: 1 + uno: '1' + - # item 2 + two: 2 + duo: '2' + - three: 3 + """ + ) + + data['items'].yaml_add_eol_comment('second pass', key=1) + + exp = """ + items: + - one: 1 + uno: '1' + - # second pass + two: 2 + duo: '2' + - three: 3 + """ + + compare(data, exp) + + def test_yaml_add_eol_comment_issue_32_ok(self): + data = load( + """ + items: + - one + - two # item 2 + - three + """ + ) + + data['items'].yaml_add_eol_comment('second pass', key=1) + + exp = """ + items: + - one + - two # second pass + - three + """ + + compare(data, exp) + + # issue 33 + @pytest.mark.xfail(reason="open issue", raises=AttributeError) + def test_yaml_set_start_comment_issue_33(self): + data = load( + """ + items: + # item 1 + - one: 1 + uno: '1' + # item 2 + - two: 2 + duo: '2' + # item 3 + - three: 3 + """ + ) + + data['items'][0].yaml_set_start_comment('uno') + data['items'][1].yaml_set_start_comment('duo') + data['items'][2].yaml_set_start_comment('tre') + + exp = """ + items: + # uno + - one: 1 + uno: '1' + # duo + - two: 2 + duo: '2' + # tre + - three: 3 + """ + + compare(data, exp) diff --git a/_test/test_comments.py b/_test/test_comments.py new file mode 100644 index 0000000..7973349 --- /dev/null +++ b/_test/test_comments.py @@ -0,0 +1,964 @@ +# coding: utf-8 + +""" +comment testing is all about roundtrips +these can be done in the "old" way by creating a file.data and file.roundtrip +but there is little flexibility in doing that + +but some things are not easily tested, eog. how a +roundtrip changes + +""" + +import pytest + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load + + +class TestComments: + def test_no_end_of_file_eol(self): + """not excluding comments caused some problems if at the end of + the file without a newline. First error, then included \0""" + x = """\ + - europe: 10 # abc""" + round_trip(x, extra='\n') + with pytest.raises(AssertionError): + round_trip(x, extra='a\n') + + def test_no_comments(self): + round_trip( + """ + - europe: 10 + - usa: + - ohio: 2 + - california: 9 + """ + ) + + def test_round_trip_ordering(self): + round_trip( + """ + a: 1 + b: 2 + c: 3 + b1: 2 + b2: 2 + d: 4 + e: 5 + f: 6 + """ + ) + + def test_complex(self): + round_trip( + """ + - europe: 10 # top + - usa: + - ohio: 2 + - california: 9 # o + """ + ) + + def test_dropped(self): + s = """\ + # comment + scalar + ... + """ + round_trip(s, 'scalar\n...\n') + + def test_main_mapping_begin_end(self): + round_trip( + """ + # C start a + # C start b + abc: 1 + ghi: 2 + klm: 3 + # C end a + # C end b + """ + ) + + def test_reindent(self): + x = """\ + a: + b: # comment 1 + c: 1 # comment 2 + """ + d = round_trip_load(x) + y = round_trip_dump(d, indent=4) + assert y == dedent( + """\ + a: + b: # comment 1 + c: 1 # comment 2 + """ + ) + + def test_main_mapping_begin_end_items_post(self): + round_trip( + """ + # C start a + # C start b + abc: 1 # abc comment + ghi: 2 + klm: 3 # klm comment + # C end a + # C end b + """ + ) + + def test_main_sequence_begin_end(self): + round_trip( + """ + # C start a + # C start b + - abc + - ghi + - klm + # C end a + # C end b + """ + ) + + def test_main_sequence_begin_end_items_post(self): + round_trip( + """ + # C start a + # C start b + - abc # abc comment + - ghi + - klm # klm comment + # C end a + # C end b + """ + ) + + def test_main_mapping_begin_end_complex(self): + round_trip( + """ + # C start a + # C start b + abc: 1 + ghi: 2 + klm: + 3a: alpha + 3b: beta # it is all greek to me + # C end a + # C end b + """ + ) + + def test_09(self): # 2.9 from the examples in the spec + s = """\ + hr: # 1998 hr ranking + - Mark McGwire + - Sammy Sosa + rbi: + # 1998 rbi ranking + - Sammy Sosa + - Ken Griffey + """ + round_trip(s, indent=4, block_seq_indent=2) + + def test_09a(self): + round_trip( + """ + hr: # 1998 hr ranking + - Mark McGwire + - Sammy Sosa + rbi: + # 1998 rbi ranking + - Sammy Sosa + - Ken Griffey + """ + ) + + def test_simple_map_middle_comment(self): + round_trip( + """ + abc: 1 + # C 3a + # C 3b + ghi: 2 + """ + ) + + def test_map_in_map_0(self): + round_trip( + """ + map1: # comment 1 + # comment 2 + map2: + key1: val1 + """ + ) + + def test_map_in_map_1(self): + # comment is moved from value to key + round_trip( + """ + map1: + # comment 1 + map2: + key1: val1 + """ + ) + + def test_application_arguments(self): + # application configur + round_trip( + """ + args: + username: anthon + passwd: secret + fullname: Anthon van der Neut + tmux: + session-name: test + loop: + wait: 10 + """ + ) + + def test_substitute(self): + x = """ + args: + username: anthon # name + passwd: secret # password + fullname: Anthon van der Neut + tmux: + session-name: test + loop: + wait: 10 + """ + data = round_trip_load(x) + data['args']['passwd'] = 'deleted password' + # note the requirement to add spaces for alignment of comment + x = x.replace(': secret ', ': deleted password') + assert round_trip_dump(data) == dedent(x) + + def test_set_comment(self): + round_trip( + """ + !!set + # the beginning + ? a + # next one is B (lowercase) + ? b # You see? Promised you. + ? c + # this is the end + """ + ) + + def test_omap_comment_roundtrip(self): + round_trip( + """ + !!omap + - a: 1 + - b: 2 # two + - c: 3 # three + - d: 4 + """ + ) + + def test_omap_comment_roundtrip_pre_comment(self): + round_trip( + """ + !!omap + - a: 1 + - b: 2 # two + - c: 3 # three + # last one + - d: 4 + """ + ) + + def test_non_ascii(self): + round_trip( + """ + verbosity: 1 # 0 is minimal output, -1 none + base_url: http://gopher.net + special_indices: [1, 5, 8] + also_special: + - a + - 19 + - 32 + asia and europe: &asia_europe + Turkey: Ankara + Russia: Moscow + countries: + Asia: + <<: *asia_europe + Japan: Tokyo # æ±äº¬ + Europe: + <<: *asia_europe + Spain: Madrid + Italy: Rome + """ + ) + + def test_dump_utf8(self): + import ruyaml # NOQA + + x = dedent( + """\ + ab: + - x # comment + - y # more comment + """ + ) + data = round_trip_load(x) + for utf in [True, False]: + y = round_trip_dump(data, default_flow_style=False, allow_unicode=utf) + assert y == x + + def test_dump_unicode_utf8(self): + import ruyaml # NOQA + + x = dedent( + """\ + ab: + - x # comment + - y # more comment + """ + ) + data = round_trip_load(x) + for utf in [True, False]: + y = round_trip_dump(data, default_flow_style=False, allow_unicode=utf) + assert y == x + + def test_mlget_00(self): + x = """\ + a: + - b: + c: 42 + - d: + f: 196 + e: + g: 3.14 + """ + d = round_trip_load(x) + assert d.mlget(['a', 1, 'd', 'f'], list_ok=True) == 196 + # with pytest.raises(AssertionError): + # d.mlget(['a', 1, 'd', 'f']) == 196 + + +class TestInsertPopList: + """list insertion is more complex than dict insertion, as you + need to move the values to subsequent keys on insert""" + + @property + def ins(self): + return """\ + ab: + - a # a + - b # b + - c + - d # d + + de: + - 1 + - 2 + """ + + def test_insert_0(self): + d = round_trip_load(self.ins) + d['ab'].insert(0, 'xyz') + y = round_trip_dump(d, indent=2) + assert y == dedent( + """\ + ab: + - xyz + - a # a + - b # b + - c + - d # d + + de: + - 1 + - 2 + """ + ) + + def test_insert_1(self): + d = round_trip_load(self.ins) + d['ab'].insert(4, 'xyz') + y = round_trip_dump(d, indent=2) + assert y == dedent( + """\ + ab: + - a # a + - b # b + - c + - d # d + + - xyz + de: + - 1 + - 2 + """ + ) + + def test_insert_2(self): + d = round_trip_load(self.ins) + d['ab'].insert(1, 'xyz') + y = round_trip_dump(d, indent=2) + assert y == dedent( + """\ + ab: + - a # a + - xyz + - b # b + - c + - d # d + + de: + - 1 + - 2 + """ + ) + + def test_pop_0(self): + d = round_trip_load(self.ins) + d['ab'].pop(0) + y = round_trip_dump(d, indent=2) + print(y) + assert y == dedent( + """\ + ab: + - b # b + - c + - d # d + + de: + - 1 + - 2 + """ + ) + + def test_pop_1(self): + d = round_trip_load(self.ins) + d['ab'].pop(1) + y = round_trip_dump(d, indent=2) + print(y) + assert y == dedent( + """\ + ab: + - a # a + - c + - d # d + + de: + - 1 + - 2 + """ + ) + + def test_pop_2(self): + d = round_trip_load(self.ins) + d['ab'].pop(2) + y = round_trip_dump(d, indent=2) + print(y) + assert y == dedent( + """\ + ab: + - a # a + - b # b + - d # d + + de: + - 1 + - 2 + """ + ) + + def test_pop_3(self): + d = round_trip_load(self.ins) + d['ab'].pop(3) + y = round_trip_dump(d, indent=2) + print(y) + assert y == dedent( + """\ + ab: + - a # a + - b # b + - c + de: + - 1 + - 2 + """ + ) + + +# inspired by demux' question on stackoverflow +# http://stackoverflow.com/a/36970608/1307905 +class TestInsertInMapping: + @property + def ins(self): + return """\ + first_name: Art + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + """ + + def test_insert_at_pos_1(self): + d = round_trip_load(self.ins) + d.insert(1, 'last name', 'Vandelay', comment='new key') + y = round_trip_dump(d) + print(y) + assert y == dedent( + """\ + first_name: Art + last name: Vandelay # new key + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + """ + ) + + def test_insert_at_pos_0(self): + d = round_trip_load(self.ins) + d.insert(0, 'last name', 'Vandelay', comment='new key') + y = round_trip_dump(d) + print(y) + assert y == dedent( + """\ + last name: Vandelay # new key + first_name: Art + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + """ + ) + + def test_insert_at_pos_3(self): + # much more simple if done with appending. + d = round_trip_load(self.ins) + d.insert(3, 'last name', 'Vandelay', comment='new key') + y = round_trip_dump(d) + print(y) + assert y == dedent( + """\ + first_name: Art + occupation: Architect # This is an occupation comment + about: Art Vandelay is a fictional character that George invents... + last name: Vandelay # new key + """ + ) + + +class TestCommentedMapMerge: + def test_in_operator(self): + data = round_trip_load( + """ + x: &base + a: 1 + b: 2 + c: 3 + y: + <<: *base + k: 4 + l: 5 + """ + ) + assert data['x']['a'] == 1 + assert 'a' in data['x'] + assert data['y']['a'] == 1 + assert 'a' in data['y'] + + def test_issue_60(self): + data = round_trip_load( + """ + x: &base + a: 1 + y: + <<: *base + """ + ) + assert data['x']['a'] == 1 + assert data['y']['a'] == 1 + assert str(data['y']) == """ordereddict([('a', 1)])""" + + def test_issue_60_1(self): + data = round_trip_load( + """ + x: &base + a: 1 + y: + <<: *base + b: 2 + """ + ) + assert data['x']['a'] == 1 + assert data['y']['a'] == 1 + assert str(data['y']) == """ordereddict([('b', 2), ('a', 1)])""" + + +class TestEmptyLines: + # prompted by issue 46 from Alex Harvey + def test_issue_46(self): + yaml_str = dedent( + """\ + --- + # Please add key/value pairs in alphabetical order + + aws_s3_bucket: 'mys3bucket' + + jenkins_ad_credentials: + bind_name: 'CN=svc-AAA-BBB-T,OU=Example,DC=COM,DC=EXAMPLE,DC=Local' + bind_pass: 'xxxxyyyy{' + """ + ) + d = round_trip_load(yaml_str, preserve_quotes=True) + y = round_trip_dump(d, explicit_start=True) + assert yaml_str == y + + def test_multispace_map(self): + round_trip( + """ + a: 1x + + b: 2x + + + c: 3x + + + + d: 4x + + """ + ) + + @pytest.mark.xfail(strict=True) + def test_multispace_map_initial(self): + round_trip( + """ + + a: 1x + + b: 2x + + + c: 3x + + + + d: 4x + + """ + ) + + def test_embedded_map(self): + round_trip( + """ + - a: 1y + b: 2y + + c: 3y + """ + ) + + def test_toplevel_seq(self): + round_trip( + """\ + - 1 + + - 2 + + - 3 + """ + ) + + def test_embedded_seq(self): + round_trip( + """ + a: + b: + - 1 + + - 2 + + + - 3 + """ + ) + + def test_line_with_only_spaces(self): + # issue 54 + yaml_str = "---\n\na: 'x'\n \nb: y\n" + d = round_trip_load(yaml_str, preserve_quotes=True) + y = round_trip_dump(d, explicit_start=True) + stripped = "" + for line in yaml_str.splitlines(): + stripped += line.rstrip() + '\n' + print(line + '$') + assert stripped == y + + def test_some_eol_spaces(self): + # spaces after tokens and on empty lines + yaml_str = '--- \n \na: "x" \n \nb: y \n' + d = round_trip_load(yaml_str, preserve_quotes=True) + y = round_trip_dump(d, explicit_start=True) + stripped = "" + for line in yaml_str.splitlines(): + stripped += line.rstrip() + '\n' + print(line + '$') + assert stripped == y + + def test_issue_54_not_ok(self): + yaml_str = dedent( + """\ + toplevel: + + # some comment + sublevel: 300 + """ + ) + d = round_trip_load(yaml_str) + print(d.ca) + y = round_trip_dump(d, indent=4) + print(y.replace('\n', '$\n')) + assert yaml_str == y + + def test_issue_54_ok(self): + yaml_str = dedent( + """\ + toplevel: + # some comment + sublevel: 300 + """ + ) + d = round_trip_load(yaml_str) + y = round_trip_dump(d, indent=4) + assert yaml_str == y + + def test_issue_93(self): + round_trip( + """\ + a: + b: + - c1: cat # a1 + # my comment on catfish + - c2: catfish # a2 + """ + ) + + def test_issue_93_00(self): + round_trip( + """\ + a: + - - c1: cat # a1 + # my comment on catfish + - c2: catfish # a2 + """ + ) + + def test_issue_93_01(self): + round_trip( + """\ + - - c1: cat # a1 + # my comment on catfish + - c2: catfish # a2 + """ + ) + + def test_issue_93_02(self): + # never failed as there is no indent + round_trip( + """\ + - c1: cat + # my comment on catfish + - c2: catfish + """ + ) + + def test_issue_96(self): + # inserted extra line on trailing spaces + round_trip( + """\ + a: + b: + c: c_val + d: + + e: + g: g_val + """ + ) + + +class TestUnicodeComments: + def test_issue_55(self): # reported by Haraguroicha Hsu + round_trip( + """\ + name: TEST + description: test using + author: Harguroicha + sql: + command: |- + select name from testtbl where no = :no + + ci-test: + - :no: 04043709 # å°èŠ± + - :no: 05161690 # 茶 + - :no: 05293147 # ã€‡ð¤‹¥å· + - :no: 05338777 # 〇〇啓 + - :no: 05273867 # 〇 + - :no: 05205786 # 〇𤦌 + """ + ) + + +class TestEmptyValueBeforeComments: + def test_issue_25a(self): + round_trip( + """\ + - a: b + c: d + d: # foo + - e: f + """ + ) + + def test_issue_25a1(self): + round_trip( + """\ + - a: b + c: d + d: # foo + e: f + """ + ) + + def test_issue_25b(self): + round_trip( + """\ + var1: #empty + var2: something #notempty + """ + ) + + def test_issue_25c(self): + round_trip( + """\ + params: + a: 1 # comment a + b: # comment b + c: 3 # comment c + """ + ) + + def test_issue_25c1(self): + round_trip( + """\ + params: + a: 1 # comment a + b: # comment b + # extra + c: 3 # comment c + """ + ) + + def test_issue_25_00(self): + round_trip( + """\ + params: + a: 1 # comment a + b: # comment b + """ + ) + + def test_issue_25_01(self): + round_trip( + """\ + a: # comment 1 + # comment 2 + - b: # comment 3 + c: 1 # comment 4 + """ + ) + + def test_issue_25_02(self): + round_trip( + """\ + a: # comment 1 + # comment 2 + - b: 2 # comment 3 + """ + ) + + def test_issue_25_03(self): + s = """\ + a: # comment 1 + # comment 2 + - b: 2 # comment 3 + """ + round_trip(s, indent=4, block_seq_indent=2) + + def test_issue_25_04(self): + round_trip( + """\ + a: # comment 1 + # comment 2 + b: 1 # comment 3 + """ + ) + + def test_flow_seq_within_seq(self): + round_trip( + """\ + # comment 1 + - a + - b + # comment 2 + - c + - d + # comment 3 + - [e] + - f + # comment 4 + - [] + """ + ) + + def test_comment_after_block_scalar_indicator(self): + round_trip( + """\ + a: | # abc + test 1 + test 2 + # all done + """ + ) + + +test_block_scalar_commented_line_template = """\ +y: p +# Some comment + +a: | + x +{}b: y +""" + + +class TestBlockScalarWithComments: + # issue 99 reported by Colm O'Connor + def test_scalar_with_comments(self): + import ruyaml # NOQA + + for x in [ + "", + '\n', + '\n# Another comment\n', + '\n\n', + '\n\n# abc\n#xyz\n', + '\n\n# abc\n#xyz\n', + '# abc\n\n#xyz\n', + '\n\n # abc\n #xyz\n', + ]: + + commented_line = test_block_scalar_commented_line_template.format(x) + data = round_trip_load(commented_line) + + assert round_trip_dump(data) == commented_line diff --git a/_test/test_contextmanager.py b/_test/test_contextmanager.py new file mode 100644 index 0000000..4539614 --- /dev/null +++ b/_test/test_contextmanager.py @@ -0,0 +1,116 @@ +# coding: utf-8 + +""" +testing of anchors and the aliases referring to them +""" + +import sys + +import pytest + +single_doc = """\ +- a: 1 +- b: + - 2 + - 3 +""" + +single_data = [dict(a=1), dict(b=[2, 3])] + +multi_doc = """\ +--- +- abc +- xyz +--- +- a: 1 +- b: + - 2 + - 3 +""" + +multi_doc_data = [['abc', 'xyz'], single_data] + + +def get_yaml(): + from ruyaml import YAML + + return YAML() + + +class TestOldStyle: + def test_single_load(self): + d = get_yaml().load(single_doc) + print(d) + print(type(d[0])) + assert d == single_data + + def test_single_load_no_arg(self): + with pytest.raises(TypeError): + assert get_yaml().load() == single_data + + def test_multi_load(self): + data = list(get_yaml().load_all(multi_doc)) + assert data == multi_doc_data + + def test_single_dump(self, capsys): + get_yaml().dump(single_data, sys.stdout) + out, err = capsys.readouterr() + assert out == single_doc + + def test_multi_dump(self, capsys): + yaml = get_yaml() + yaml.explicit_start = True + yaml.dump_all(multi_doc_data, sys.stdout) + out, err = capsys.readouterr() + assert out == multi_doc + + +class TestContextManager: + def test_single_dump(self, capsys): + from ruyaml import YAML + + with YAML(output=sys.stdout) as yaml: + yaml.dump(single_data) + out, err = capsys.readouterr() + print(err) + assert out == single_doc + + def test_multi_dump(self, capsys): + from ruyaml import YAML + + with YAML(output=sys.stdout) as yaml: + yaml.explicit_start = True + yaml.dump(multi_doc_data[0]) + yaml.dump(multi_doc_data[1]) + + out, err = capsys.readouterr() + print(err) + assert out == multi_doc + + # input is not as simple with a context manager + # you need to indicate what you expect hence load and load_all + + # @pytest.mark.xfail(strict=True) + # def test_single_load(self): + # from ruyaml import YAML + # with YAML(input=single_doc) as yaml: + # assert yaml.load() == single_data + # + # @pytest.mark.xfail(strict=True) + # def test_multi_load(self): + # from ruyaml import YAML + # with YAML(input=multi_doc) as yaml: + # for idx, data in enumerate(yaml.load()): + # assert data == multi_doc_data[0] + + def test_roundtrip(self, capsys): + from ruyaml import YAML + + with YAML(output=sys.stdout) as yaml: + yaml.explicit_start = True + for data in yaml.load_all(multi_doc): + yaml.dump(data) + + out, err = capsys.readouterr() + print(err) + assert out == multi_doc diff --git a/_test/test_copy.py b/_test/test_copy.py new file mode 100644 index 0000000..7ebd4c1 --- /dev/null +++ b/_test/test_copy.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +""" +Testing copy and deepcopy, instigated by Issue 84 (Peter Amstutz) +""" + +import copy + +import pytest # NOQA + +from .roundtrip import dedent, round_trip_dump, round_trip_load + + +class TestDeepCopy: + def test_preserve_flow_style_simple(self): + x = dedent( + """\ + {foo: bar, baz: quux} + """ + ) + data = round_trip_load(x) + data_copy = copy.deepcopy(data) + y = round_trip_dump(data_copy) + print('x [{}]'.format(x)) + print('y [{}]'.format(y)) + assert y == x + assert data.fa.flow_style() == data_copy.fa.flow_style() + + def test_deepcopy_flow_style_nested_dict(self): + x = dedent( + """\ + a: {foo: bar, baz: quux} + """ + ) + data = round_trip_load(x) + assert data['a'].fa.flow_style() is True + data_copy = copy.deepcopy(data) + assert data_copy['a'].fa.flow_style() is True + data_copy['a'].fa.set_block_style() + assert data['a'].fa.flow_style() != data_copy['a'].fa.flow_style() + assert data['a'].fa._flow_style is True + assert data_copy['a'].fa._flow_style is False + y = round_trip_dump(data_copy) + + print('x [{}]'.format(x)) + print('y [{}]'.format(y)) + assert y == dedent( + """\ + a: + foo: bar + baz: quux + """ + ) + + def test_deepcopy_flow_style_nested_list(self): + x = dedent( + """\ + a: [1, 2, 3] + """ + ) + data = round_trip_load(x) + assert data['a'].fa.flow_style() is True + data_copy = copy.deepcopy(data) + assert data_copy['a'].fa.flow_style() is True + data_copy['a'].fa.set_block_style() + assert data['a'].fa.flow_style() != data_copy['a'].fa.flow_style() + assert data['a'].fa._flow_style is True + assert data_copy['a'].fa._flow_style is False + y = round_trip_dump(data_copy) + + print('x [{}]'.format(x)) + print('y [{}]'.format(y)) + assert y == dedent( + """\ + a: + - 1 + - 2 + - 3 + """ + ) + + +class TestCopy: + def test_copy_flow_style_nested_dict(self): + x = dedent( + """\ + a: {foo: bar, baz: quux} + """ + ) + data = round_trip_load(x) + assert data['a'].fa.flow_style() is True + data_copy = copy.copy(data) + assert data_copy['a'].fa.flow_style() is True + data_copy['a'].fa.set_block_style() + assert data['a'].fa.flow_style() == data_copy['a'].fa.flow_style() + assert data['a'].fa._flow_style is False + assert data_copy['a'].fa._flow_style is False + y = round_trip_dump(data_copy) + z = round_trip_dump(data) + assert y == z + + assert y == dedent( + """\ + a: + foo: bar + baz: quux + """ + ) + + def test_copy_flow_style_nested_list(self): + x = dedent( + """\ + a: [1, 2, 3] + """ + ) + data = round_trip_load(x) + assert data['a'].fa.flow_style() is True + data_copy = copy.copy(data) + assert data_copy['a'].fa.flow_style() is True + data_copy['a'].fa.set_block_style() + assert data['a'].fa.flow_style() == data_copy['a'].fa.flow_style() + assert data['a'].fa._flow_style is False + assert data_copy['a'].fa._flow_style is False + y = round_trip_dump(data_copy) + + print('x [{}]'.format(x)) + print('y [{}]'.format(y)) + assert y == dedent( + """\ + a: + - 1 + - 2 + - 3 + """ + ) diff --git a/_test/test_cyaml.py b/_test/test_cyaml.py new file mode 100644 index 0000000..b16c7ab --- /dev/null +++ b/_test/test_cyaml.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +import platform +import sys +from textwrap import dedent + +import pytest + +NO_CLIB_VER = (3, 10) + + +@pytest.mark.skipif( + platform.python_implementation() in ['Jython', 'PyPy'], + reason='Jython throws RepresenterError', +) +@pytest.mark.xfail(reason="cyaml not ported yet") +def test_load_cyaml(): + print("???????????????????????", platform.python_implementation()) + import ruyaml + + if sys.version_info >= NO_CLIB_VER: + return + yaml = ruyaml.YAML(typ='safe', pure=False) + assert ruyaml.__with_libyaml__ + + yaml.load('abc: 1') + + +@pytest.mark.skipif( + sys.version_info >= NO_CLIB_VER + or platform.python_implementation() in ['Jython', 'PyPy'], + reason='no _PyGC_FINALIZED', +) +def test_dump_cyaml(): + import ruyaml + + if sys.version_info >= NO_CLIB_VER: + return + data = {'a': 1, 'b': 2} + yaml = ruyaml.YAML(typ='safe', pure=False) + yaml.default_flow_style = False + yaml.allow_unicode = True + buf = ruyaml.compat.StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == 'a: 1\nb: 2\n' + + +@pytest.mark.skipif( + platform.python_implementation() in ['Jython', 'PyPy'], reason='not avialable' +) +@pytest.mark.xfail(reason="cyaml not ported yet") +def test_load_cyaml_1_2(): + # issue 155 + import ruyaml + + if sys.version_info >= NO_CLIB_VER: + return + assert ruyaml.__with_libyaml__ + inp = dedent( + """\ + %YAML 1.2 + --- + num_epochs: 70000 + """ + ) + yaml = ruyaml.YAML(typ='safe') + yaml.load(inp) + + +@pytest.mark.skipif( + platform.python_implementation() in ['Jython', 'PyPy'], reason='not available' +) +@pytest.mark.xfail(reason="cyaml not ported yet") +def test_dump_cyaml_1_2(): + # issue 155 + from io import StringIO + + import ruyaml + + if sys.version_info >= NO_CLIB_VER: + return + assert ruyaml.__with_libyaml__ + yaml = ruyaml.YAML(typ='safe') + yaml.version = (1, 2) + yaml.default_flow_style = False + data = {'a': 1, 'b': 2} + exp = dedent( + """\ + %YAML 1.2 + --- + a: 1 + b: 2 + """ + ) + buf = StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == exp diff --git a/_test/test_datetime.py b/_test/test_datetime.py new file mode 100644 index 0000000..9997ba0 --- /dev/null +++ b/_test/test_datetime.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" +http://yaml.org/type/timestamp.html specifies the regexp to use +for datetime.date and datetime.datetime construction. Date is simple +but datetime can have 'T' or 't' as well as 'Z' or a timezone offset (in +hours and minutes). This information was originally used to create +a UTC datetime and then discarded + +examples from the above: + +canonical: 2001-12-15T02:59:43.1Z +valid iso8601: 2001-12-14t21:59:43.10-05:00 +space separated: 2001-12-14 21:59:43.10 -5 +no time zone (Z): 2001-12-15 2:59:43.10 +date (00:00:00Z): 2002-12-14 + +Please note that a fraction can only be included if not equal to 0 + +""" + +import copy + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +class TestDateTime: + def test_date_only(self): + inp = """ + - 2011-10-02 + """ + exp = """ + - 2011-10-02 + """ + round_trip(inp, exp) + + def test_zero_fraction(self): + inp = """ + - 2011-10-02 16:45:00.0 + """ + exp = """ + - 2011-10-02 16:45:00 + """ + round_trip(inp, exp) + + def test_long_fraction(self): + inp = """ + - 2011-10-02 16:45:00.1234 # expand with zeros + - 2011-10-02 16:45:00.123456 + - 2011-10-02 16:45:00.12345612 # round to microseconds + - 2011-10-02 16:45:00.1234565 # round up + - 2011-10-02 16:45:00.12345678 # round up + """ + exp = """ + - 2011-10-02 16:45:00.123400 # expand with zeros + - 2011-10-02 16:45:00.123456 + - 2011-10-02 16:45:00.123456 # round to microseconds + - 2011-10-02 16:45:00.123457 # round up + - 2011-10-02 16:45:00.123457 # round up + """ + round_trip(inp, exp) + + def test_canonical(self): + inp = """ + - 2011-10-02T16:45:00.1Z + """ + exp = """ + - 2011-10-02T16:45:00.100000Z + """ + round_trip(inp, exp) + + def test_spaced_timezone(self): + inp = """ + - 2011-10-02T11:45:00 -5 + """ + exp = """ + - 2011-10-02T11:45:00-5 + """ + round_trip(inp, exp) + + def test_normal_timezone(self): + round_trip( + """ + - 2011-10-02T11:45:00-5 + - 2011-10-02 11:45:00-5 + - 2011-10-02T11:45:00-05:00 + - 2011-10-02 11:45:00-05:00 + """ + ) + + def test_no_timezone(self): + inp = """ + - 2011-10-02 6:45:00 + """ + exp = """ + - 2011-10-02 06:45:00 + """ + round_trip(inp, exp) + + def test_explicit_T(self): + inp = """ + - 2011-10-02T16:45:00 + """ + exp = """ + - 2011-10-02T16:45:00 + """ + round_trip(inp, exp) + + def test_explicit_t(self): # to upper + inp = """ + - 2011-10-02t16:45:00 + """ + exp = """ + - 2011-10-02T16:45:00 + """ + round_trip(inp, exp) + + def test_no_T_multi_space(self): + inp = """ + - 2011-10-02 16:45:00 + """ + exp = """ + - 2011-10-02 16:45:00 + """ + round_trip(inp, exp) + + def test_iso(self): + round_trip( + """ + - 2011-10-02T15:45:00+01:00 + """ + ) + + def test_zero_tz(self): + round_trip( + """ + - 2011-10-02T15:45:00+0 + """ + ) + + def test_issue_45(self): + round_trip( + """ + dt: 2016-08-19T22:45:47Z + """ + ) + + def test_deepcopy_datestring(self): + # reported by Quuxplusone, http://stackoverflow.com/a/41577841/1307905 + x = dedent( + """\ + foo: 2016-10-12T12:34:56 + """ + ) + data = copy.deepcopy(round_trip_load(x)) + assert round_trip_dump(data) == x diff --git a/_test/test_deprecation.py b/_test/test_deprecation.py new file mode 100644 index 0000000..c6ed62e --- /dev/null +++ b/_test/test_deprecation.py @@ -0,0 +1,14 @@ +# coding: utf-8 + +import sys + +import pytest # NOQA + + +@pytest.mark.skipif( + sys.version_info < (3, 7) or sys.version_info >= (3, 9), + reason='collections not available?', +) +def test_collections_deprecation(): + with pytest.warns(DeprecationWarning): + from collections import Hashable # NOQA diff --git a/_test/test_documents.py b/_test/test_documents.py new file mode 100644 index 0000000..b750d5f --- /dev/null +++ b/_test/test_documents.py @@ -0,0 +1,75 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import round_trip, round_trip_dump_all, round_trip_load_all + + +class TestDocument: + def test_single_doc_begin_end(self): + inp = """\ + --- + - a + - b + ... + """ + round_trip(inp, explicit_start=True, explicit_end=True) + + def test_multi_doc_begin_end(self): + inp = """\ + --- + - a + ... + --- + - b + ... + """ + docs = list(round_trip_load_all(inp)) + assert docs == [['a'], ['b']] + out = round_trip_dump_all(docs, explicit_start=True, explicit_end=True) + assert out == '---\n- a\n...\n---\n- b\n...\n' + + def test_multi_doc_no_start(self): + inp = """\ + - a + ... + --- + - b + ... + """ + docs = list(round_trip_load_all(inp)) + assert docs == [['a'], ['b']] + + def test_multi_doc_no_end(self): + inp = """\ + - a + --- + - b + """ + docs = list(round_trip_load_all(inp)) + assert docs == [['a'], ['b']] + + def test_multi_doc_ends_only(self): + # this is ok in 1.2 + inp = """\ + - a + ... + - b + ... + """ + docs = list(round_trip_load_all(inp, version=(1, 2))) + assert docs == [['a'], ['b']] + + def test_multi_doc_ends_only_1_1(self): + import ruyaml + + # this is not ok in 1.1 + with pytest.raises(ruyaml.parser.ParserError): + inp = """\ + - a + ... + - b + ... + """ + docs = list(round_trip_load_all(inp, version=(1, 1))) + assert docs == [['a'], ['b']] # not True, but not reached diff --git a/_test/test_fail.py b/_test/test_fail.py new file mode 100644 index 0000000..4970c5e --- /dev/null +++ b/_test/test_fail.py @@ -0,0 +1,255 @@ +# coding: utf-8 + +# there is some work to do +# provide a failing test xyz and a non-failing xyz_no_fail ( to see +# what the current failing output is. +# on fix of ruyaml, move the marked test to the appropriate test (without mark) +# and remove remove the xyz_no_fail + +import pytest + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load + + +class TestCommentFailures: + @pytest.mark.xfail(strict=True) + def test_set_comment_before_tag(self): + # no comments before tags + round_trip( + """ + # the beginning + !!set + # or this one? + ? a + # next one is B (lowercase) + ? b # You see? Promised you. + ? c + # this is the end + """ + ) + + def test_set_comment_before_tag_no_fail(self): + # no comments before tags + inp = """ + # the beginning + !!set + # or this one? + ? a + # next one is B (lowercase) + ? b # You see? Promised you. + ? c + # this is the end + """ + assert round_trip_dump(round_trip_load(inp)) == dedent( + """ + !!set + # or this one? + ? a + # next one is B (lowercase) + ? b # You see? Promised you. + ? c + # this is the end + """ + ) + + @pytest.mark.xfail(strict=True) + def test_comment_dash_line(self): + round_trip( + """ + - # abc + a: 1 + b: 2 + """ + ) + + def test_comment_dash_line_fail(self): + x = """ + - # abc + a: 1 + b: 2 + """ + data = round_trip_load(x) + # this is not nice + assert round_trip_dump(data) == dedent( + """ + # abc + - a: 1 + b: 2 + """ + ) + + +class TestIndentFailures: + @pytest.mark.xfail(strict=True) + def test_indent_not_retained(self): + round_trip( + """ + verbosity: 1 # 0 is minimal output, -1 none + base_url: http://gopher.net + special_indices: [1, 5, 8] + also_special: + - a + - 19 + - 32 + asia and europe: &asia_europe + Turkey: Ankara + Russia: Moscow + countries: + Asia: + <<: *asia_europe + Japan: Tokyo # æ±äº¬ + Europe: + <<: *asia_europe + Spain: Madrid + Italy: Rome + Antarctica: + - too cold + """ + ) + + def test_indent_not_retained_no_fail(self): + inp = """ + verbosity: 1 # 0 is minimal output, -1 none + base_url: http://gopher.net + special_indices: [1, 5, 8] + also_special: + - a + - 19 + - 32 + asia and europe: &asia_europe + Turkey: Ankara + Russia: Moscow + countries: + Asia: + <<: *asia_europe + Japan: Tokyo # æ±äº¬ + Europe: + <<: *asia_europe + Spain: Madrid + Italy: Rome + Antarctica: + - too cold + """ + assert round_trip_dump(round_trip_load(inp), indent=4) == dedent( + """ + verbosity: 1 # 0 is minimal output, -1 none + base_url: http://gopher.net + special_indices: [1, 5, 8] + also_special: + - a + - 19 + - 32 + asia and europe: &asia_europe + Turkey: Ankara + Russia: Moscow + countries: + Asia: + <<: *asia_europe + Japan: Tokyo # æ±äº¬ + Europe: + <<: *asia_europe + Spain: Madrid + Italy: Rome + Antarctica: + - too cold + """ + ) + + def Xtest_indent_top_level_no_fail(self): + inp = """ + - a: + - b + """ + round_trip(inp, indent=4) + + +class TestTagFailures: + @pytest.mark.xfail(strict=True) + def test_standard_short_tag(self): + round_trip( + """\ + !!map + name: Anthon + location: Germany + language: python + """ + ) + + def test_standard_short_tag_no_fail(self): + inp = """ + !!map + name: Anthon + location: Germany + language: python + """ + exp = """ + name: Anthon + location: Germany + language: python + """ + assert round_trip_dump(round_trip_load(inp)) == dedent(exp) + + +class TestFlowValues: + def test_flow_value_with_colon(self): + inp = """\ + {a: bcd:efg} + """ + round_trip(inp) + + def test_flow_value_with_colon_quoted(self): + inp = """\ + {a: 'bcd:efg'} + """ + round_trip(inp, preserve_quotes=True) + + +class TestMappingKey: + def test_simple_mapping_key(self): + inp = """\ + {a: 1, b: 2}: hello world + """ + round_trip(inp, preserve_quotes=True, dump_data=False) + + def test_set_simple_mapping_key(self): + from ruyaml.comments import CommentedKeyMap + + d = {CommentedKeyMap([('a', 1), ('b', 2)]): 'hello world'} + exp = dedent( + """\ + {a: 1, b: 2}: hello world + """ + ) + assert round_trip_dump(d) == exp + + def test_change_key_simple_mapping_key(self): + from ruyaml.comments import CommentedKeyMap + + inp = """\ + {a: 1, b: 2}: hello world + """ + d = round_trip_load(inp, preserve_quotes=True) + d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop( + CommentedKeyMap([('a', 1), ('b', 2)]) + ) + exp = dedent( + """\ + {b: 1, a: 2}: hello world + """ + ) + assert round_trip_dump(d) == exp + + def test_change_value_simple_mapping_key(self): + from ruyaml.comments import CommentedKeyMap + + inp = """\ + {a: 1, b: 2}: hello world + """ + d = round_trip_load(inp, preserve_quotes=True) + d = {CommentedKeyMap([('a', 1), ('b', 2)]): 'goodbye'} + exp = dedent( + """\ + {a: 1, b: 2}: goodbye + """ + ) + assert round_trip_dump(d) == exp diff --git a/_test/test_float.py b/_test/test_float.py new file mode 100644 index 0000000..4be2e48 --- /dev/null +++ b/_test/test_float.py @@ -0,0 +1,90 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + +# http://yaml.org/type/int.html is where underscores in integers are defined + + +class TestFloat: + def test_round_trip_non_exp(self): + data = round_trip( + """\ + - 1.0 + - 1.00 + - 23.100 + - -1.0 + - -1.00 + - -23.100 + - 42. + - -42. + - +42. + - .5 + - +.5 + - -.5 + """ + ) + print(data) + assert 0.999 < data[0] < 1.001 + assert 0.999 < data[1] < 1.001 + assert 23.099 < data[2] < 23.101 + assert 0.999 < -data[3] < 1.001 + assert 0.999 < -data[4] < 1.001 + assert 23.099 < -data[5] < 23.101 + assert 41.999 < data[6] < 42.001 + assert 41.999 < -data[7] < 42.001 + assert 41.999 < data[8] < 42.001 + assert 0.49 < data[9] < 0.51 + assert 0.49 < data[10] < 0.51 + assert -0.51 < data[11] < -0.49 + + def test_round_trip_zeros_0(self): + data = round_trip( + """\ + - 0. + - +0. + - -0. + - 0.0 + - +0.0 + - -0.0 + - 0.00 + - +0.00 + - -0.00 + """ + ) + print(data) + for d in data: + assert -0.00001 < d < 0.00001 + + def Xtest_round_trip_non_exp_trailing_dot(self): + data = round_trip( + """\ + """ + ) + print(data) + + def test_yaml_1_1_no_dot(self): + from ruyaml.error import MantissaNoDotYAML1_1Warning + + with pytest.warns(MantissaNoDotYAML1_1Warning): + round_trip_load( + """\ + %YAML 1.1 + --- + - 1e6 + """ + ) + + +class TestCalculations: + def test_mul_00(self): + # issue 149 reported by jan.brezina@tul.cz + d = round_trip_load( + """\ + - 0.1 + """ + ) + d[0] *= -1 + x = round_trip_dump(d) + assert x == '- -0.1\n' diff --git a/_test/test_flowsequencekey.py b/_test/test_flowsequencekey.py new file mode 100644 index 0000000..8362bec --- /dev/null +++ b/_test/test_flowsequencekey.py @@ -0,0 +1,25 @@ +# coding: utf-8 + +""" +test flow style sequences as keys roundtrip + +""" + +# import pytest + +from .roundtrip import round_trip # , dedent, round_trip_load, round_trip_dump + + +class TestFlowStyleSequenceKey: + def test_so_39595807(self): + inp = """\ + %YAML 1.2 + --- + [2, 3, 4]: + a: + - 1 + - 2 + b: Hello World! + c: 'Voilà!' + """ + round_trip(inp, preserve_quotes=True, explicit_start=True, version=(1, 2)) diff --git a/_test/test_indentation.py b/_test/test_indentation.py new file mode 100644 index 0000000..a68f69b --- /dev/null +++ b/_test/test_indentation.py @@ -0,0 +1,352 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import YAML, dedent, round_trip, round_trip_dump, round_trip_load + + +def rt(s): + + res = round_trip_dump(round_trip_load(s)) + return res.strip() + '\n' + + +class TestIndent: + def test_roundtrip_inline_list(self): + s = 'a: [a, b, c]\n' + output = rt(s) + assert s == output + + def test_roundtrip_mapping_of_inline_lists(self): + s = dedent( + """\ + a: [a, b, c] + j: [k, l, m] + """ + ) + output = rt(s) + assert s == output + + def test_roundtrip_mapping_of_inline_lists_comments(self): + s = dedent( + """\ + # comment A + a: [a, b, c] + # comment B + j: [k, l, m] + """ + ) + output = rt(s) + assert s == output + + def test_roundtrip_mapping_of_inline_sequence_eol_comments(self): + s = dedent( + """\ + # comment A + a: [a, b, c] # comment B + j: [k, l, m] # comment C + """ + ) + output = rt(s) + assert s == output + + # first test by explicitly setting flow style + def test_added_inline_list(self): + s1 = dedent( + """ + a: + - b + - c + - d + """ + ) + s = 'a: [b, c, d]\n' + data = round_trip_load(s1) + val = data['a'] + val.fa.set_flow_style() + # print(type(val), '_yaml_format' in dir(val)) + output = round_trip_dump(data) + assert s == output + + # ############ flow mappings + + def test_roundtrip_flow_mapping(self): + s = dedent( + """\ + - {a: 1, b: hallo} + - {j: fka, k: 42} + """ + ) + data = round_trip_load(s) + output = round_trip_dump(data) + assert s == output + + def test_roundtrip_sequence_of_inline_mappings_eol_comments(self): + s = dedent( + """\ + # comment A + - {a: 1, b: hallo} # comment B + - {j: fka, k: 42} # comment C + """ + ) + output = rt(s) + assert s == output + + def test_indent_top_level(self): + inp = """ + - a: + - b + """ + round_trip(inp, indent=4) + + def test_set_indent_5_block_list_indent_1(self): + inp = """ + a: + - b: c + - 1 + - d: + - 2 + """ + round_trip(inp, indent=5, block_seq_indent=1) + + def test_set_indent_4_block_list_indent_2(self): + inp = """ + a: + - b: c + - 1 + - d: + - 2 + """ + round_trip(inp, indent=4, block_seq_indent=2) + + def test_set_indent_3_block_list_indent_0(self): + inp = """ + a: + - b: c + - 1 + - d: + - 2 + """ + round_trip(inp, indent=3, block_seq_indent=0) + + def Xtest_set_indent_3_block_list_indent_2(self): + inp = """ + a: + - + b: c + - + 1 + - + d: + - + 2 + """ + round_trip(inp, indent=3, block_seq_indent=2) + + def test_set_indent_3_block_list_indent_2(self): + inp = """ + a: + - b: c + - 1 + - d: + - 2 + """ + round_trip(inp, indent=3, block_seq_indent=2) + + def Xtest_set_indent_2_block_list_indent_2(self): + inp = """ + a: + - + b: c + - + 1 + - + d: + - + 2 + """ + round_trip(inp, indent=2, block_seq_indent=2) + + # this is how it should be: block_seq_indent stretches the indent + def test_set_indent_2_block_list_indent_2(self): + inp = """ + a: + - b: c + - 1 + - d: + - 2 + """ + round_trip(inp, indent=2, block_seq_indent=2) + + # have to set indent! + def test_roundtrip_four_space_indents(self): + # fmt: off + s = ( + 'a:\n' + '- foo\n' + '- bar\n' + ) + # fmt: on + round_trip(s, indent=4) + + def test_roundtrip_four_space_indents_no_fail(self): + inp = """ + a: + - foo + - bar + """ + exp = """ + a: + - foo + - bar + """ + assert round_trip_dump(round_trip_load(inp)) == dedent(exp) + + +class TestYpkgIndent: + def test_00(self): + inp = """ + name : nano + version : 2.3.2 + release : 1 + homepage : http://www.nano-editor.org + source : + - http://www.nano-editor.org/dist/v2.3/nano-2.3.2.tar.gz : ff30924807ea289f5b60106be8 + license : GPL-2.0 + summary : GNU nano is an easy-to-use text editor + builddeps : + - ncurses-devel + description: | + GNU nano is an easy-to-use text editor originally designed + as a replacement for Pico, the ncurses-based editor from the non-free mailer + package Pine (itself now available under the Apache License as Alpine). + """ + round_trip( + inp, + indent=4, + block_seq_indent=2, + top_level_colon_align=True, + prefix_colon=' ', + ) + + +def guess(s): + from ruyaml.util import load_yaml_guess_indent + + x, y, z = load_yaml_guess_indent(dedent(s)) + return y, z + + +class TestGuessIndent: + def test_guess_20(self): + inp = """\ + a: + - 1 + """ + assert guess(inp) == (2, 0) + + def test_guess_42(self): + inp = """\ + a: + - 1 + """ + assert guess(inp) == (4, 2) + + def test_guess_42a(self): + # block seq indent prevails over nested key indent level + inp = """\ + b: + a: + - 1 + """ + assert guess(inp) == (4, 2) + + def test_guess_3None(self): + inp = """\ + b: + a: 1 + """ + assert guess(inp) == (3, None) + + +class TestSeparateMapSeqIndents: + # using uncommon 6 indent with 3 push in as 2 push in automatically + # gets you 4 indent even if not set + def test_00(self): + # old style + yaml = YAML() + yaml.indent = 6 + yaml.block_seq_indent = 3 + inp = """ + a: + - 1 + - [1, 2] + """ + yaml.round_trip(inp) + + def test_01(self): + yaml = YAML() + yaml.indent(sequence=6) + yaml.indent(offset=3) + inp = """ + a: + - 1 + - {b: 3} + """ + yaml.round_trip(inp) + + def test_02(self): + yaml = YAML() + yaml.indent(mapping=5, sequence=6, offset=3) + inp = """ + a: + b: + - 1 + - [1, 2] + """ + yaml.round_trip(inp) + + def test_03(self): + inp = """ + a: + b: + c: + - 1 + - [1, 2] + """ + round_trip(inp, indent=4) + + def test_04(self): + yaml = YAML() + yaml.indent(mapping=5, sequence=6) + inp = """ + a: + b: + - 1 + - [1, 2] + - {d: 3.14} + """ + yaml.round_trip(inp) + + def test_issue_51(self): + yaml = YAML() + # yaml.map_indent = 2 # the default + yaml.indent(sequence=4, offset=2) + yaml.preserve_quotes = True + yaml.round_trip( + """ + role::startup::author::rsyslog_inputs: + imfile: + - ruleset: 'AEM-slinglog' + File: '/opt/aem/author/crx-quickstart/logs/error.log' + startmsg.regex: '^[-+T.:[:digit:]]*' + tag: 'error' + - ruleset: 'AEM-slinglog' + File: '/opt/aem/author/crx-quickstart/logs/stdout.log' + startmsg.regex: '^[-+T.:[:digit:]]*' + tag: 'stdout' + """ + ) + + +# ############ indentation diff --git a/_test/test_int.py b/_test/test_int.py new file mode 100644 index 0000000..d409746 --- /dev/null +++ b/_test/test_int.py @@ -0,0 +1,34 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import dedent, round_trip_dump, round_trip_load + +# http://yaml.org/type/int.html is where underscores in integers are defined + + +class TestBinHexOct: + def test_calculate(self): + # make sure type, leading zero(s) and underscore are preserved + s = dedent( + """\ + - 42 + - 0b101010 + - 0x_2a + - 0x2A + - 0o00_52 + """ + ) + d = round_trip_load(s) + for idx, elem in enumerate(d): + elem -= 21 + d[idx] = elem + for idx, elem in enumerate(d): + elem *= 2 + d[idx] = elem + for idx, elem in enumerate(d): + t = elem + elem **= 2 + elem //= t + d[idx] = elem + assert round_trip_dump(d) == s diff --git a/_test/test_issues.py b/_test/test_issues.py new file mode 100644 index 0000000..65efa95 --- /dev/null +++ b/_test/test_issues.py @@ -0,0 +1,957 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import ( # NOQA + YAML, + dedent, + na_round_trip, + round_trip, + round_trip_dump, + round_trip_load, + save_and_run, +) + + +class TestIssues: + def test_issue_61(self): + s = dedent( + """ + def1: &ANCHOR1 + key1: value1 + def: &ANCHOR + <<: *ANCHOR1 + key: value + comb: + <<: *ANCHOR + """ + ) + data = round_trip_load(s) + assert str(data['comb']) == str(data['def']) + assert ( + str(data['comb']) == "ordereddict([('key', 'value'), ('key1', 'value1')])" + ) + + def test_issue_82(self, tmpdir): + program_src = r''' + import ruyaml as yaml + import re + + class SINumber(yaml.YAMLObject): + PREFIXES = {'k': 1e3, 'M': 1e6, 'G': 1e9} + yaml_loader = yaml.Loader + yaml_dumper = yaml.Dumper + yaml_tag = '!si' + yaml_implicit_pattern = re.compile( + r'^(?P[0-9]+(?:\.[0-9]+)?)(?P[kMG])$') + + @classmethod + def from_yaml(cls, loader, node): + return cls(node.value) + + @classmethod + def to_yaml(cls, dumper, data): + return dumper.represent_scalar(cls.yaml_tag, str(data)) + + def __init__(self, *args): + m = self.yaml_implicit_pattern.match(args[0]) + self.value = float(m.groupdict()['value']) + self.prefix = m.groupdict()['prefix'] + + def __str__(self): + return str(self.value)+self.prefix + + def __int__(self): + return int(self.value*self.PREFIXES[self.prefix]) + + # This fails: + yaml.add_implicit_resolver(SINumber.yaml_tag, SINumber.yaml_implicit_pattern) + + ret = yaml.load(""" + [1,2,3, !si 10k, 100G] + """, Loader=yaml.Loader) + for idx, l in enumerate([1, 2, 3, 10000, 100000000000]): + assert int(ret[idx]) == l + ''' + assert save_and_run(dedent(program_src), tmpdir) == 0 + + def test_issue_82rt(self, tmpdir): + yaml_str = '[1, 2, 3, !si 10k, 100G]\n' + x = round_trip(yaml_str, preserve_quotes=True) # NOQA + + def test_issue_102(self): + yaml_str = dedent( + """ + var1: #empty + var2: something #notempty + var3: {} #empty object + var4: {a: 1} #filled object + var5: [] #empty array + """ + ) + x = round_trip(yaml_str, preserve_quotes=True) # NOQA + + def test_issue_150(self): + from ruyaml import YAML + + inp = """\ + base: &base_key + first: 123 + second: 234 + + child: + <<: *base_key + third: 345 + """ + yaml = YAML() + data = yaml.load(inp) + child = data['child'] + assert 'second' in dict(**child) + + def test_issue_160(self): + from io import StringIO + + s = dedent( + """\ + root: + # a comment + - {some_key: "value"} + + foo: 32 + bar: 32 + """ + ) + a = round_trip_load(s) + del a['root'][0]['some_key'] + buf = StringIO() + round_trip_dump(a, buf, block_seq_indent=4) + exp = dedent( + """\ + root: + # a comment + - {} + + foo: 32 + bar: 32 + """ + ) + assert buf.getvalue() == exp + + def test_issue_161(self): + yaml_str = dedent( + """\ + mapping-A: + key-A:{} + mapping-B: + """ + ) + for comment in ['', ' # no-newline', ' # some comment\n', '\n']: + s = yaml_str.format(comment) + res = round_trip(s) # NOQA + + def test_issue_161a(self): + yaml_str = dedent( + """\ + mapping-A: + key-A:{} + mapping-B: + """ + ) + for comment in ['\n# between']: + s = yaml_str.format(comment) + res = round_trip(s) # NOQA + + def test_issue_163(self): + s = dedent( + """\ + some-list: + # List comment + - {} + """ + ) + x = round_trip(s, preserve_quotes=True) # NOQA + + json_str = ( + r'{"sshKeys":[{"name":"AETROS\/google-k80-1","uses":0,"getLastUse":0,' + '"fingerprint":"MD5:19:dd:41:93:a1:a3:f5:91:4a:8e:9b:d0:ae:ce:66:4c",' + '"created":1509497961}]}' + ) + + json_str2 = '{"abc":[{"a":"1", "uses":0}]}' + + def test_issue_172(self): + x = round_trip_load(TestIssues.json_str2) # NOQA + x = round_trip_load(TestIssues.json_str) # NOQA + + def test_issue_176(self): + # basic request by Stuart Berg + from ruyaml import YAML + + yaml = YAML() + seq = yaml.load('[1,2,3]') + seq[:] = [1, 2, 3, 4] + + def test_issue_176_preserve_comments_on_extended_slice_assignment(self): + yaml_str = dedent( + """\ + - a + - b # comment + - c # commment c + # comment c+ + - d + + - e # comment + """ + ) + seq = round_trip_load(yaml_str) + seq[1::2] = ['B', 'D'] + res = round_trip_dump(seq) + assert res == yaml_str.replace(' b ', ' B ').replace(' d\n', ' D\n') + + def test_issue_176_test_slicing(self): + mss = round_trip_load('[0, 1, 2, 3, 4]') + assert len(mss) == 5 + assert mss[2:2] == [] + assert mss[2:4] == [2, 3] + assert mss[1::2] == [1, 3] + + # slice assignment + m = mss[:] + m[2:2] = [42] + assert m == [0, 1, 42, 2, 3, 4] + + m = mss[:] + m[:3] = [42, 43, 44] + assert m == [42, 43, 44, 3, 4] + m = mss[:] + m[2:] = [42, 43, 44] + assert m == [0, 1, 42, 43, 44] + m = mss[:] + m[:] = [42, 43, 44] + assert m == [42, 43, 44] + + # extend slice assignment + m = mss[:] + m[2:4] = [42, 43, 44] + assert m == [0, 1, 42, 43, 44, 4] + m = mss[:] + m[1::2] = [42, 43] + assert m == [0, 42, 2, 43, 4] + m = mss[:] + with pytest.raises(TypeError, match='too many'): + m[1::2] = [42, 43, 44] + with pytest.raises(TypeError, match='not enough'): + m[1::2] = [42] + m = mss[:] + m += [5] + m[1::2] = [42, 43, 44] + assert m == [0, 42, 2, 43, 4, 44] + + # deleting + m = mss[:] + del m[1:3] + assert m == [0, 3, 4] + m = mss[:] + del m[::2] + assert m == [1, 3] + m = mss[:] + del m[:] + assert m == [] + + def test_issue_184(self): + yaml_str = dedent( + """\ + test::test: + # test + foo: + bar: baz + """ + ) + d = round_trip_load(yaml_str) + d['bar'] = 'foo' + d.yaml_add_eol_comment('test1', 'bar') + assert round_trip_dump(d) == yaml_str + 'bar: foo # test1\n' + + def test_issue_219(self): + yaml_str = dedent( + """\ + [StackName: AWS::StackName] + """ + ) + d = round_trip_load(yaml_str) # NOQA + + def test_issue_219a(self): + yaml_str = dedent( + """\ + [StackName: + AWS::StackName] + """ + ) + d = round_trip_load(yaml_str) # NOQA + + def test_issue_220(self, tmpdir): + program_src = r''' + from ruyaml import YAML + + yaml_str = """\ + --- + foo: ["bar"] + """ + + yaml = YAML(typ='safe', pure=True) + d = yaml.load(yaml_str) + print(d) + ''' + assert save_and_run(dedent(program_src), tmpdir, optimized=True) == 0 + + def test_issue_221_add(self): + from ruyaml.comments import CommentedSeq + + a = CommentedSeq([1, 2, 3]) + a + [4, 5] + + def test_issue_221_sort(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + - d + - a # 1 + - c # 3 + - e # 5 + - b # 2 + """ + ) + a = yaml.load(dedent(inp)) + a.sort() + buf = StringIO() + yaml.dump(a, buf) + exp = dedent( + """\ + - a # 1 + - b # 2 + - c # 3 + - d + - e # 5 + """ + ) + assert buf.getvalue() == exp + + def test_issue_221_sort_reverse(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + - d + - a # 1 + - c # 3 + - e # 5 + - b # 2 + """ + ) + a = yaml.load(dedent(inp)) + a.sort(reverse=True) + buf = StringIO() + yaml.dump(a, buf) + exp = dedent( + """\ + - e # 5 + - d + - c # 3 + - b # 2 + - a # 1 + """ + ) + assert buf.getvalue() == exp + + def test_issue_221_sort_key(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + - four + - One # 1 + - Three # 3 + - five # 5 + - two # 2 + """ + ) + a = yaml.load(dedent(inp)) + a.sort(key=str.lower) + buf = StringIO() + yaml.dump(a, buf) + exp = dedent( + """\ + - five # 5 + - four + - One # 1 + - Three # 3 + - two # 2 + """ + ) + assert buf.getvalue() == exp + + def test_issue_221_sort_key_reverse(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + - four + - One # 1 + - Three # 3 + - five # 5 + - two # 2 + """ + ) + a = yaml.load(dedent(inp)) + a.sort(key=str.lower, reverse=True) + buf = StringIO() + yaml.dump(a, buf) + exp = dedent( + """\ + - two # 2 + - Three # 3 + - One # 1 + - four + - five # 5 + """ + ) + assert buf.getvalue() == exp + + def test_issue_222(self): + from io import StringIO + + import ruyaml + + yaml = ruyaml.YAML(typ='safe') + buf = StringIO() + yaml.dump(['012923'], buf) + assert buf.getvalue() == "['012923']\n" + + def test_issue_223(self): + import ruyaml + + yaml = ruyaml.YAML(typ='safe') + yaml.load('phone: 0123456789') + + def test_issue_232(self): + import ruyaml + + yaml = ruyaml.YAML(typ='safe', pure=True) + + with pytest.raises(ruyaml.parser.ParserError): + yaml.load(']') + with pytest.raises(ruyaml.parser.ParserError): + yaml.load('{]') + + def test_issue_233(self): + import json + + from ruyaml import YAML + + yaml = YAML() + data = yaml.load('{}') + json_str = json.dumps(data) # NOQA + + def test_issue_233a(self): + import json + + from ruyaml import YAML + + yaml = YAML() + data = yaml.load('[]') + json_str = json.dumps(data) # NOQA + + def test_issue_234(self): + from ruyaml import YAML + + inp = dedent( + """\ + - key: key1 + ctx: [one, two] + help: one + cmd: > + foo bar + foo bar + """ + ) + yaml = YAML(typ='safe', pure=True) + data = yaml.load(inp) + fold = data[0]['cmd'] + print(repr(fold)) + assert '\a' not in fold + + def test_issue_236(self): + inp = """ + conf: + xx: {a: "b", c: []} + asd: "nn" + """ + d = round_trip(inp, preserve_quotes=True) # NOQA + + def test_issue_238(self, tmpdir): + program_src = r""" + import ruyaml + from io import StringIO + + yaml = ruyaml.YAML(typ='unsafe') + + + class A: + def __setstate__(self, d): + self.__dict__ = d + + + class B: + pass + + + a = A() + b = B() + + a.x = b + b.y = [b] + assert a.x.y[0] == a.x + + buf = StringIO() + yaml.dump(a, buf) + + data = yaml.load(buf.getvalue()) + assert data.x.y[0] == data.x + """ + assert save_and_run(dedent(program_src), tmpdir) == 0 + + def test_issue_239(self): + inp = """ + first_name: Art + occupation: Architect + # I'm safe + about: Art Vandelay is a fictional character that George invents... + # we are not :( + # help me! + --- + # what?! + hello: world + # someone call the Batman + foo: bar # or quz + # Lost again + --- + I: knew + # final words + """ + d = YAML().round_trip_all(inp) # NOQA + + def test_issue_242(self): + from ruyaml.comments import CommentedMap + + d0 = CommentedMap([('a', 'b')]) + assert d0['a'] == 'b' + + def test_issue_245(self): + from ruyaml import YAML + + inp = """ + d: yes + """ + for typ in ['safepure', 'rt', 'safe']: + if typ.endswith('pure'): + pure = True + typ = typ[:-4] + else: + pure = None + + yaml = YAML(typ=typ, pure=pure) + yaml.version = (1, 1) + d = yaml.load(inp) + print(typ, yaml.parser, yaml.resolver) + assert d['d'] is True + + def test_issue_249(self): + yaml = YAML() + inp = dedent( + """\ + # comment + - + - 1 + - 2 + - 3 + """ + ) + exp = dedent( + """\ + # comment + - - 1 + - 2 + - 3 + """ + ) + yaml.round_trip(inp, outp=exp) # NOQA + + def test_issue_250(self): + inp = """ + # 1. + - - 1 + # 2. + - map: 2 + # 3. + - 4 + """ + d = round_trip(inp) # NOQA + + # @pytest.mark.xfail(strict=True, reason='bla bla', raises=AssertionError) + def test_issue_279(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + yaml.indent(sequence=4, offset=2) + inp = dedent( + """\ + experiments: + - datasets: + # ATLAS EWK + - {dataset: ATLASWZRAP36PB, frac: 1.0} + - {dataset: ATLASZHIGHMASS49FB, frac: 1.0} + """ + ) + a = yaml.load(inp) + buf = StringIO() + yaml.dump(a, buf) + print(buf.getvalue()) + assert buf.getvalue() == inp + + def test_issue_280(self): + from collections import namedtuple + from sys import stdout + + from ruyaml import YAML + from ruyaml.representer import RepresenterError + + T = namedtuple('T', ('a', 'b')) + t = T(1, 2) + yaml = YAML() + with pytest.raises(RepresenterError, match='cannot represent'): + yaml.dump({'t': t}, stdout) + + def test_issue_282(self): + # update from list of tuples caused AttributeError + import ruyaml + + yaml_data = ruyaml.comments.CommentedMap([('a', 'apple'), ('b', 'banana')]) + yaml_data.update([('c', 'cantaloupe')]) + yaml_data.update({'d': 'date', 'k': 'kiwi'}) + assert 'c' in yaml_data.keys() + assert 'c' in yaml_data._ok + + def test_issue_284(self): + import ruyaml + + inp = dedent( + """\ + plain key: in-line value + : # Both empty + "quoted key": + - entry + """ + ) + yaml = ruyaml.YAML(typ='rt') + yaml.version = (1, 2) + d = yaml.load(inp) + assert d[None] is None + + yaml = ruyaml.YAML(typ='rt') + yaml.version = (1, 1) + with pytest.raises(ruyaml.parser.ParserError, match='expected '): + d = yaml.load(inp) + + def test_issue_285(self): + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + %YAML 1.1 + --- + - y + - n + - Y + - N + """ + ) + a = yaml.load(inp) + assert a[0] + assert a[2] + assert not a[1] + assert not a[3] + + def test_issue_286(self): + from io import StringIO + + from ruyaml import YAML + + yaml = YAML() + inp = dedent( + """\ + parent_key: + - sub_key: sub_value + + # xxx""" + ) + a = yaml.load(inp) + a['new_key'] = 'new_value' + buf = StringIO() + yaml.dump(a, buf) + assert buf.getvalue().endswith('xxx\nnew_key: new_value\n') + + def test_issue_288(self): + import sys + from io import StringIO + + from ruyaml import YAML + + yamldoc = dedent( + """\ + --- + # Reusable values + aliases: + # First-element comment + - &firstEntry First entry + # Second-element comment + - &secondEntry Second entry + + # Third-element comment is + # a multi-line value + - &thirdEntry Third entry + + # EOF Comment + """ + ) + + yaml = YAML() + yaml.indent(mapping=2, sequence=4, offset=2) + yaml.explicit_start = True + yaml.preserve_quotes = True + yaml.width = sys.maxsize + data = yaml.load(yamldoc) + buf = StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == yamldoc + + def test_issue_288a(self): + import sys + from io import StringIO + + from ruyaml import YAML + + yamldoc = dedent( + """\ + --- + # Reusable values + aliases: + # First-element comment + - &firstEntry First entry + # Second-element comment + - &secondEntry Second entry + + # Third-element comment is + # a multi-line value + - &thirdEntry Third entry + + # EOF Comment + """ + ) + + yaml = YAML() + yaml.indent(mapping=2, sequence=4, offset=2) + yaml.explicit_start = True + yaml.preserve_quotes = True + yaml.width = sys.maxsize + data = yaml.load(yamldoc) + buf = StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == yamldoc + + def test_issue_290(self): + import sys + from io import StringIO + + from ruyaml import YAML + + yamldoc = dedent( + """\ + --- + aliases: + # Folded-element comment + # for a multi-line value + - &FoldedEntry > + THIS IS A + FOLDED, MULTI-LINE + VALUE + + # Literal-element comment + # for a multi-line value + - &literalEntry | + THIS IS A + LITERAL, MULTI-LINE + VALUE + + # Plain-element comment + - &plainEntry Plain entry + """ + ) + + yaml = YAML() + yaml.indent(mapping=2, sequence=4, offset=2) + yaml.explicit_start = True + yaml.preserve_quotes = True + yaml.width = sys.maxsize + data = yaml.load(yamldoc) + buf = StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == yamldoc + + def test_issue_290a(self): + import sys + from io import StringIO + + from ruyaml import YAML + + yamldoc = dedent( + """\ + --- + aliases: + # Folded-element comment + # for a multi-line value + - &FoldedEntry > + THIS IS A + FOLDED, MULTI-LINE + VALUE + + # Literal-element comment + # for a multi-line value + - &literalEntry | + THIS IS A + LITERAL, MULTI-LINE + VALUE + + # Plain-element comment + - &plainEntry Plain entry + """ + ) + + yaml = YAML() + yaml.indent(mapping=2, sequence=4, offset=2) + yaml.explicit_start = True + yaml.preserve_quotes = True + yaml.width = sys.maxsize + data = yaml.load(yamldoc) + buf = StringIO() + yaml.dump(data, buf) + assert buf.getvalue() == yamldoc + + # @pytest.mark.xfail(strict=True, reason='should fail pre 0.15.100', raises=AssertionError) + def test_issue_295(self): + # deepcopy also makes a copy of the start and end mark, and these did not + # have any comparison beyond their ID, which of course changed, breaking + # some old merge_comment code + import copy + + inp = dedent( + """ + A: + b: + # comment + - l1 + - l2 + + C: + d: e + f: + # comment2 + - - l31 + - l32 + - l33: '5' + """ + ) + data = round_trip_load(inp) # NOQA + dc = copy.deepcopy(data) + assert round_trip_dump(dc) == inp + + def test_issue_300(self): + from ruyaml import YAML + + inp = dedent( + """ + %YAML 1.2 + %TAG ! tag:example.com,2019/path#fragment + --- + null + """ + ) + YAML().load(inp) + + def test_issue_300a(self): + import ruyaml + + inp = dedent( + """ + %YAML 1.1 + %TAG ! tag:example.com,2019/path#fragment + --- + null + """ + ) + yaml = YAML() + with pytest.raises( + ruyaml.scanner.ScannerError, match='while scanning a directive' + ): + yaml.load(inp) + + def test_issue_304(self): + inp = """ + %YAML 1.2 + %TAG ! tag:example.com,2019: + --- + !foo null + ... + """ + d = na_round_trip(inp) # NOQA + + def test_issue_305(self): + inp = """ + %YAML 1.2 + --- + ! null + ... + """ + d = na_round_trip(inp) # NOQA + + def test_issue_307(self): + inp = """ + %YAML 1.2 + %TAG ! tag:example.com,2019/path# + --- + null + ... + """ + d = na_round_trip(inp) # NOQA + + +# @pytest.mark.xfail(strict=True, reason='bla bla', raises=AssertionError) +# def test_issue_ xxx(self): +# inp = """ +# """ +# d = round_trip(inp) # NOQA diff --git a/_test/test_json_numbers.py b/_test/test_json_numbers.py new file mode 100644 index 0000000..1bfd5ba --- /dev/null +++ b/_test/test_json_numbers.py @@ -0,0 +1,56 @@ +# coding: utf-8 + +import json + +import pytest # NOQA + + +def load(s, typ=float): + import ruyaml + + yaml = ruyaml.YAML() + x = '{"low": %s }' % (s) + print('input: [%s]' % (s), repr(x)) + # just to check it is loadable json + res = json.loads(x) + assert isinstance(res['low'], typ) + ret_val = yaml.load(x) + print(ret_val) + return ret_val['low'] + + +class TestJSONNumbers: + # based on http://stackoverflow.com/a/30462009/1307905 + # yaml number regex: http://yaml.org/spec/1.2/spec.html#id2804092 + # + # -? [1-9] ( \. [0-9]* [1-9] )? ( e [-+] [1-9] [0-9]* )? + # + # which is not a superset of the JSON numbers + def test_json_number_float(self): + for x in ( + y.split('#')[0].strip() + for y in """ + 1.0 # should fail on YAML spec on 1-9 allowed as single digit + -1.0 + 1e-06 + 3.1e-5 + 3.1e+5 + 3.1e5 # should fail on YAML spec: no +- after e + """.splitlines() + ): + if not x: + continue + res = load(x) + assert isinstance(res, float) + + def test_json_number_int(self): + for x in ( + y.split('#')[0].strip() + for y in """ + 42 + """.splitlines() + ): + if not x: + continue + res = load(x, int) + assert isinstance(res, int) diff --git a/_test/test_line_col.py b/_test/test_line_col.py new file mode 100644 index 0000000..6bc5a82 --- /dev/null +++ b/_test/test_line_col.py @@ -0,0 +1,104 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +def load(s): + return round_trip_load(dedent(s)) + + +class TestLineCol: + def test_item_00(self): + data = load( + """ + - a + - e + - [b, d] + - c + """ + ) + assert data[2].lc.line == 2 + assert data[2].lc.col == 2 + + def test_item_01(self): + data = load( + """ + - a + - e + - {x: 3} + - c + """ + ) + assert data[2].lc.line == 2 + assert data[2].lc.col == 2 + + def test_item_02(self): + data = load( + """ + - a + - e + - !!set {x, y} + - c + """ + ) + assert data[2].lc.line == 2 + assert data[2].lc.col == 2 + + def test_item_03(self): + data = load( + """ + - a + - e + - !!omap + - x: 1 + - y: 3 + - c + """ + ) + assert data[2].lc.line == 2 + assert data[2].lc.col == 2 + + def test_item_04(self): + data = load( + """ + # testing line and column based on SO + # http://stackoverflow.com/questions/13319067/ + - key1: item 1 + key2: item 2 + - key3: another item 1 + key4: another item 2 + """ + ) + assert data[0].lc.line == 2 + assert data[0].lc.col == 2 + assert data[1].lc.line == 4 + assert data[1].lc.col == 2 + + def test_pos_mapping(self): + data = load( + """ + a: 1 + b: 2 + c: 3 + # comment + klm: 42 + d: 4 + """ + ) + assert data.lc.key('klm') == (4, 0) + assert data.lc.value('klm') == (4, 5) + + def test_pos_sequence(self): + data = load( + """ + - a + - b + - c + # next one! + - klm + - d + """ + ) + assert data.lc.item(3) == (4, 2) diff --git a/_test/test_literal.py b/_test/test_literal.py new file mode 100644 index 0000000..dbd2e2b --- /dev/null +++ b/_test/test_literal.py @@ -0,0 +1,335 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import YAML # does an automatic dedent on load + +""" +YAML 1.0 allowed root level literal style without indentation: + "Usually top level nodes are not indented" (example 4.21 in 4.6.3) +YAML 1.1 is a bit vague but says: + "Regardless of style, scalar content must always be indented by at least one space" + (4.4.3) + "In general, the document’s node is indented as if it has a parent indented at -1 spaces." + (4.3.3) +YAML 1.2 is again clear about root literal level scalar after directive in example 9.5: + +%YAML 1.2 +--- | +%!PS-Adobe-2.0 +... +%YAML1.2 +--- +# Empty +... +""" + + +class TestNoIndent: + def test_root_literal_scalar_indent_example_9_5(self): + yaml = YAML() + s = '%!PS-Adobe-2.0' + inp = """ + --- | + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_no_indent(self): + yaml = YAML() + s = 'testing123' + inp = """ + --- | + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_no_indent_1_1(self): + yaml = YAML() + s = 'testing123' + inp = """ + %YAML 1.1 + --- | + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_no_indent_1_1_old_style(self): + from textwrap import dedent + + from ruyaml import YAML + + yaml = YAML(typ='safe', pure=True) + s = 'testing123' + inp = """ + %YAML 1.1 + --- | + {} + """ + d = yaml.load(dedent(inp.format(s))) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_no_indent_1_1_no_raise(self): + # from ruyaml.parser import ParserError + + yaml = YAML() + yaml.root_level_block_style_scalar_no_indent_error_1_1 = True + s = 'testing123' + # with pytest.raises(ParserError): + if True: + inp = """ + %YAML 1.1 + --- | + {} + """ + yaml.load(inp.format(s)) + + def test_root_literal_scalar_indent_offset_one(self): + yaml = YAML() + s = 'testing123' + inp = """ + --- |1 + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_indent_offset_four(self): + yaml = YAML() + s = 'testing123' + inp = """ + --- |4 + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_scalar_indent_offset_two_leading_space(self): + yaml = YAML() + s = ' testing123' + inp = """ + --- |4 + {s} + {s} + """ + d = yaml.load(inp.format(s=s)) + print(d) + assert d == (s + '\n') * 2 + + def test_root_literal_scalar_no_indent_special(self): + yaml = YAML() + s = '%!PS-Adobe-2.0' + inp = """ + --- | + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_folding_scalar_indent(self): + yaml = YAML() + s = '%!PS-Adobe-2.0' + inp = """ + --- > + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_folding_scalar_no_indent(self): + yaml = YAML() + s = 'testing123' + inp = """ + --- > + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_folding_scalar_no_indent_special(self): + yaml = YAML() + s = '%!PS-Adobe-2.0' + inp = """ + --- > + {} + """ + d = yaml.load(inp.format(s)) + print(d) + assert d == s + '\n' + + def test_root_literal_multi_doc(self): + yaml = YAML(typ='safe', pure=True) + s1 = 'abc' + s2 = 'klm' + inp = """ + --- |- + {} + --- | + {} + """ + for idx, d1 in enumerate(yaml.load_all(inp.format(s1, s2))): + print('d1:', d1) + assert ['abc', 'klm\n'][idx] == d1 + + def test_root_literal_doc_indent_directives_end(self): + yaml = YAML() + yaml.explicit_start = True + inp = """ + --- |- + %YAML 1.3 + --- + this: is a test + """ + yaml.round_trip(inp) + + def test_root_literal_doc_indent_document_end(self): + yaml = YAML() + yaml.explicit_start = True + inp = """ + --- |- + some more + ... + text + """ + yaml.round_trip(inp) + + def test_root_literal_doc_indent_marker(self): + yaml = YAML() + yaml.explicit_start = True + inp = """ + --- |2 + some more + text + """ + d = yaml.load(inp) + print(type(d), repr(d)) + yaml.round_trip(inp) + + def test_nested_literal_doc_indent_marker(self): + yaml = YAML() + yaml.explicit_start = True + inp = """ + --- + a: |2 + some more + text + """ + d = yaml.load(inp) + print(type(d), repr(d)) + yaml.round_trip(inp) + + +class Test_RoundTripLiteral: + def test_rt_root_literal_scalar_no_indent(self): + yaml = YAML() + yaml.explicit_start = True + s = 'testing123' + ys = """ + --- | + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_literal_scalar_indent(self): + yaml = YAML() + yaml.explicit_start = True + yaml.indent = 4 + s = 'testing123' + ys = """ + --- | + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_plain_scalar_no_indent(self): + yaml = YAML() + yaml.explicit_start = True + yaml.indent = 0 + s = 'testing123' + ys = """ + --- + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_plain_scalar_expl_indent(self): + yaml = YAML() + yaml.explicit_start = True + yaml.indent = 4 + s = 'testing123' + ys = """ + --- + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_sq_scalar_expl_indent(self): + yaml = YAML() + yaml.explicit_start = True + yaml.indent = 4 + s = "'testing: 123'" + ys = """ + --- + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_dq_scalar_expl_indent(self): + # if yaml.indent is the default (None) + # then write after the directive indicator + yaml = YAML() + yaml.explicit_start = True + yaml.indent = 0 + s = '"\'testing123"' + ys = """ + --- + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_root_literal_scalar_no_indent_no_eol(self): + yaml = YAML() + yaml.explicit_start = True + s = 'testing123' + ys = """ + --- |- + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) + + def test_rt_non_root_literal_scalar(self): + yaml = YAML() + s = 'testing123' + ys = """ + - | + {} + """ + ys = ys.format(s) + d = yaml.load(ys) + yaml.dump(d, compare=ys) diff --git a/_test/test_none.py b/_test/test_none.py new file mode 100644 index 0000000..fa81c10 --- /dev/null +++ b/_test/test_none.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import round_trip_dump, round_trip_load + + +class TestNone: + def test_dump00(self): + data = None + s = round_trip_dump(data) + assert s == 'null\n...\n' + d = round_trip_load(s) + assert d == data + + def test_dump01(self): + data = None + s = round_trip_dump(data, explicit_end=True) + assert s == 'null\n...\n' + d = round_trip_load(s) + assert d == data + + def test_dump02(self): + data = None + s = round_trip_dump(data, explicit_end=False) + assert s == 'null\n...\n' + d = round_trip_load(s) + assert d == data + + def test_dump03(self): + data = None + s = round_trip_dump(data, explicit_start=True) + assert s == '---\n...\n' + d = round_trip_load(s) + assert d == data + + def test_dump04(self): + data = None + s = round_trip_dump(data, explicit_start=True, explicit_end=False) + assert s == '---\n...\n' + d = round_trip_load(s) + assert d == data diff --git a/_test/test_numpy.py b/_test/test_numpy.py new file mode 100644 index 0000000..4590625 --- /dev/null +++ b/_test/test_numpy.py @@ -0,0 +1,22 @@ +# coding: utf-8 + +try: + import numpy +except: # NOQA + numpy = None + + +def Xtest_numpy(): + import ruyaml + + if numpy is None: + return + data = numpy.arange(10) + print('data', type(data), data) + + yaml_str = ruyaml.dump(data) + datb = ruyaml.load(yaml_str) + print('datb', type(datb), datb) + + print('\nYAML', yaml_str) + assert data == datb diff --git a/_test/test_program_config.py b/_test/test_program_config.py new file mode 100644 index 0000000..d633f72 --- /dev/null +++ b/_test/test_program_config.py @@ -0,0 +1,65 @@ +# coding: utf-8 + +import pytest # NOQA + +# import ruyaml +from .roundtrip import round_trip + + +class TestProgramConfig: + def test_application_arguments(self): + # application configur + round_trip( + """ + args: + username: anthon + passwd: secret + fullname: Anthon van der Neut + tmux: + session-name: test + loop: + wait: 10 + """ + ) + + def test_single(self): + # application configuration + round_trip( + """ + # default arguments for the program + args: # needed to prevent comment wrapping + # this should be your username + username: anthon + passwd: secret # this is plaintext don't reuse \ +# important/system passwords + fullname: Anthon van der Neut + tmux: + session-name: test # make sure this doesn't clash with + # other sessions + loop: # looping related defaults + # experiment with the following + wait: 10 + # no more argument info to pass + """ + ) + + def test_multi(self): + # application configuration + round_trip( + """ + # default arguments for the program + args: # needed to prevent comment wrapping + # this should be your username + username: anthon + passwd: secret # this is plaintext don't reuse + # important/system passwords + fullname: Anthon van der Neut + tmux: + session-name: test # make sure this doesn't clash with + # other sessions + loop: # looping related defaults + # experiment with the following + wait: 10 + # no more argument info to pass + """ + ) diff --git a/_test/test_spec_examples.py b/_test/test_spec_examples.py new file mode 100644 index 0000000..3a1725c --- /dev/null +++ b/_test/test_spec_examples.py @@ -0,0 +1,337 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import YAML + + +def test_example_2_1(): + yaml = YAML() + yaml.round_trip( + """ + - Mark McGwire + - Sammy Sosa + - Ken Griffey + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_2(): + yaml = YAML() + yaml.mapping_value_align = True + yaml.round_trip( + """ + hr: 65 # Home runs + avg: 0.278 # Batting average + rbi: 147 # Runs Batted In + """ + ) + + +def test_example_2_3(): + yaml = YAML() + yaml.indent(sequence=4, offset=2) + yaml.round_trip( + """ + american: + - Boston Red Sox + - Detroit Tigers + - New York Yankees + national: + - New York Mets + - Chicago Cubs + - Atlanta Braves + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_4(): + yaml = YAML() + yaml.mapping_value_align = True + yaml.round_trip( + """ + - + name: Mark McGwire + hr: 65 + avg: 0.278 + - + name: Sammy Sosa + hr: 63 + avg: 0.288 + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_5(): + yaml = YAML() + yaml.flow_sequence_element_align = True + yaml.round_trip( + """ + - [name , hr, avg ] + - [Mark McGwire, 65, 0.278] + - [Sammy Sosa , 63, 0.288] + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_6(): + yaml = YAML() + # yaml.flow_mapping_final_comma = False + yaml.flow_mapping_one_element_per_line = True + yaml.round_trip( + """ + Mark McGwire: {hr: 65, avg: 0.278} + Sammy Sosa: { + hr: 63, + avg: 0.288 + } + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_7(): + yaml = YAML() + yaml.round_trip_all( + """ + # Ranking of 1998 home runs + --- + - Mark McGwire + - Sammy Sosa + - Ken Griffey + + # Team ranking + --- + - Chicago Cubs + - St Louis Cardinals + """ + ) + + +def test_example_2_8(): + yaml = YAML() + yaml.explicit_start = True + yaml.explicit_end = True + yaml.round_trip_all( + """ + --- + time: 20:03:20 + player: Sammy Sosa + action: strike (miss) + ... + --- + time: 20:03:47 + player: Sammy Sosa + action: grand slam + ... + """ + ) + + +def test_example_2_9(): + yaml = YAML() + yaml.explicit_start = True + yaml.indent(sequence=4, offset=2) + yaml.round_trip( + """ + --- + hr: # 1998 hr ranking + - Mark McGwire + - Sammy Sosa + rbi: + # 1998 rbi ranking + - Sammy Sosa + - Ken Griffey + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_10(): + yaml = YAML() + yaml.explicit_start = True + yaml.indent(sequence=4, offset=2) + yaml.round_trip( + """ + --- + hr: + - Mark McGwire + # Following node labeled SS + - &SS Sammy Sosa + rbi: + - *SS # Subsequent occurrence + - Ken Griffey + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_11(): + yaml = YAML() + yaml.round_trip( + """ + ? - Detroit Tigers + - Chicago cubs + : + - 2001-07-23 + + ? [ New York Yankees, + Atlanta Braves ] + : [ 2001-07-02, 2001-08-12, + 2001-08-14 ] + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_12(): + yaml = YAML() + yaml.explicit_start = True + yaml.round_trip( + """ + --- + # Products purchased + - item : Super Hoop + quantity: 1 + - item : Basketball + quantity: 4 + - item : Big Shoes + quantity: 1 + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_13(): + yaml = YAML() + yaml.round_trip( + r""" + # ASCII Art + --- | + \//||\/|| + // || ||__ + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_14(): + yaml = YAML() + yaml.explicit_start = True + yaml.indent(root_scalar=2) # needs to be added + yaml.round_trip( + """ + --- > + Mark McGwire's + year was crippled + by a knee injury. + """ + ) + + +@pytest.mark.xfail(strict=True) +def test_example_2_15(): + yaml = YAML() + yaml.round_trip( + """ + > + Sammy Sosa completed another + fine season with great stats. + + 63 Home Runs + 0.288 Batting Average + + What a year! + """ + ) + + +def test_example_2_16(): + yaml = YAML() + yaml.round_trip( + """ + name: Mark McGwire + accomplishment: > + Mark set a major league + home run record in 1998. + stats: | + 65 Home Runs + 0.278 Batting Average + """ + ) + + +@pytest.mark.xfail( + strict=True, reason='cannot YAML dump escape sequences (\n) as hex and normal' +) +def test_example_2_17(): + yaml = YAML() + yaml.allow_unicode = False + yaml.preserve_quotes = True + yaml.round_trip( + r""" + unicode: "Sosa did fine.\u263A" + control: "\b1998\t1999\t2000\n" + hex esc: "\x0d\x0a is \r\n" + + single: '"Howdy!" he cried.' + quoted: ' # Not a ''comment''.' + tie-fighter: '|\-*-/|' + """ + ) + + +@pytest.mark.xfail( + strict=True, reason='non-literal/folding multiline scalars not supported' +) +def test_example_2_18(): + yaml = YAML() + yaml.round_trip( + """ + plain: + This unquoted scalar + spans many lines. + + quoted: "So does this + quoted scalar.\n" + """ + ) + + +@pytest.mark.xfail(strict=True, reason='leading + on decimal dropped') +def test_example_2_19(): + yaml = YAML() + yaml.round_trip( + """ + canonical: 12345 + decimal: +12345 + octal: 0o14 + hexadecimal: 0xC + """ + ) + + +@pytest.mark.xfail(strict=True, reason='case of NaN not preserved') +def test_example_2_20(): + yaml = YAML() + yaml.round_trip( + """ + canonical: 1.23015e+3 + exponential: 12.3015e+02 + fixed: 1230.15 + negative infinity: -.inf + not a number: .NaN + """ + ) + + +def Xtest_example_2_X(): + yaml = YAML() + yaml.round_trip( + """ + """ + ) diff --git a/_test/test_string.py b/_test/test_string.py new file mode 100644 index 0000000..1527e54 --- /dev/null +++ b/_test/test_string.py @@ -0,0 +1,228 @@ +# coding: utf-8 + +""" +various test cases for string scalars in YAML files +'|' for preserved newlines +'>' for folded (newlines become spaces) + +and the chomping modifiers: +'-' for stripping: final line break and any trailing empty lines are excluded +'+' for keeping: final line break and empty lines are preserved +'' for clipping: final line break preserved, empty lines at end not + included in content (no modifier) + +""" + +import platform + +import pytest + +# from ruyaml.compat import ordereddict +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +class TestLiteralScalarString: + def test_basic_string(self): + round_trip( + """ + a: abcdefg + """ + ) + + def test_quoted_integer_string(self): + round_trip( + """ + a: '12345' + """ + ) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_preserve_string(self): + inp = """ + a: | + abc + def + """ + round_trip(inp, intermediate=dict(a='abc\ndef\n')) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_preserve_string_strip(self): + s = """ + a: |- + abc + def + + """ + round_trip(s, intermediate=dict(a='abc\ndef')) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_preserve_string_keep(self): + # with pytest.raises(AssertionError) as excinfo: + inp = """ + a: |+ + ghi + jkl + + + b: x + """ + round_trip(inp, intermediate=dict(a='ghi\njkl\n\n\n', b='x')) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_preserve_string_keep_at_end(self): + # at EOF you have to specify the ... to get proper "closure" + # of the multiline scalar + inp = """ + a: |+ + ghi + jkl + + ... + """ + round_trip(inp, intermediate=dict(a='ghi\njkl\n\n')) + + def test_fold_string(self): + inp = """ + a: > + abc + def + + """ + round_trip(inp) + + def test_fold_string_strip(self): + inp = """ + a: >- + abc + def + + """ + round_trip(inp) + + def test_fold_string_keep(self): + with pytest.raises(AssertionError) as excinfo: # NOQA + inp = """ + a: >+ + abc + def + + """ + round_trip(inp, intermediate=dict(a='abc def\n\n')) + + +class TestQuotedScalarString: + def test_single_quoted_string(self): + inp = """ + a: 'abc' + """ + round_trip(inp, preserve_quotes=True) + + def test_double_quoted_string(self): + inp = """ + a: "abc" + """ + round_trip(inp, preserve_quotes=True) + + def test_non_preserved_double_quoted_string(self): + inp = """ + a: "abc" + """ + exp = """ + a: abc + """ + round_trip(inp, outp=exp) + + +class TestReplace: + """inspired by issue 110 from sandres23""" + + def test_replace_preserved_scalar_string(self): + import ruyaml + + s = dedent( + """\ + foo: | + foo + foo + bar + foo + """ + ) + data = round_trip_load(s, preserve_quotes=True) + so = data['foo'].replace('foo', 'bar', 2) + assert isinstance(so, ruyaml.scalarstring.LiteralScalarString) + assert so == dedent( + """ + bar + bar + bar + foo + """ + ) + + def test_replace_double_quoted_scalar_string(self): + import ruyaml + + s = dedent( + """\ + foo: "foo foo bar foo" + """ + ) + data = round_trip_load(s, preserve_quotes=True) + so = data['foo'].replace('foo', 'bar', 2) + assert isinstance(so, ruyaml.scalarstring.DoubleQuotedScalarString) + assert so == 'bar bar bar foo' + + +class TestWalkTree: + def test_basic(self): + from ruyaml.comments import CommentedMap + from ruyaml.scalarstring import walk_tree + + data = CommentedMap() + data[1] = 'a' + data[2] = 'with\nnewline\n' + walk_tree(data) + exp = """\ + 1: a + 2: | + with + newline + """ + assert round_trip_dump(data) == dedent(exp) + + def test_map(self): + from ruyaml.comments import CommentedMap + from ruyaml.compat import ordereddict + from ruyaml.scalarstring import DoubleQuotedScalarString as dq + from ruyaml.scalarstring import SingleQuotedScalarString as sq + from ruyaml.scalarstring import preserve_literal, walk_tree + + data = CommentedMap() + data[1] = 'a' + data[2] = 'with\nnew : line\n' + data[3] = '${abc}' + data[4] = 'almost:mapping' + m = ordereddict([('\n', preserve_literal), ('${', sq), (':', dq)]) + walk_tree(data, map=m) + exp = """\ + 1: a + 2: | + with + new : line + 3: '${abc}' + 4: "almost:mapping" + """ + assert round_trip_dump(data) == dedent(exp) diff --git a/_test/test_tag.py b/_test/test_tag.py new file mode 100644 index 0000000..8168493 --- /dev/null +++ b/_test/test_tag.py @@ -0,0 +1,171 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import YAML, round_trip, round_trip_load + + +def register_xxx(**kw): + import ruyaml as yaml + + class XXX(yaml.comments.CommentedMap): + @staticmethod + def yaml_dump(dumper, data): + return dumper.represent_mapping('!xxx', data) + + @classmethod + def yaml_load(cls, constructor, node): + data = cls() + yield data + constructor.construct_mapping(node, data) + + yaml.add_constructor('!xxx', XXX.yaml_load, constructor=yaml.RoundTripConstructor) + yaml.add_representer(XXX, XXX.yaml_dump, representer=yaml.RoundTripRepresenter) + + +class TestIndentFailures: + def test_tag(self): + round_trip( + """\ + !!python/object:__main__.Developer + name: Anthon + location: Germany + language: python + """ + ) + + def test_full_tag(self): + round_trip( + """\ + !!tag:yaml.org,2002:python/object:__main__.Developer + name: Anthon + location: Germany + language: python + """ + ) + + def test_standard_tag(self): + round_trip( + """\ + !!tag:yaml.org,2002:python/object:map + name: Anthon + location: Germany + language: python + """ + ) + + def test_Y1(self): + round_trip( + """\ + !yyy + name: Anthon + location: Germany + language: python + """ + ) + + def test_Y2(self): + round_trip( + """\ + !!yyy + name: Anthon + location: Germany + language: python + """ + ) + + +class TestRoundTripCustom: + def test_X1(self): + register_xxx() + round_trip( + """\ + !xxx + name: Anthon + location: Germany + language: python + """ + ) + + @pytest.mark.xfail(strict=True) + def test_X_pre_tag_comment(self): + register_xxx() + round_trip( + """\ + - + # hello + !xxx + name: Anthon + location: Germany + language: python + """ + ) + + @pytest.mark.xfail(strict=True) + def test_X_post_tag_comment(self): + register_xxx() + round_trip( + """\ + - !xxx + # hello + name: Anthon + location: Germany + language: python + """ + ) + + def test_scalar_00(self): + # https://stackoverflow.com/a/45967047/1307905 + round_trip( + """\ + Outputs: + Vpc: + Value: !Ref: vpc # first tag + Export: + Name: !Sub "${AWS::StackName}-Vpc" # second tag + """ + ) + + +class TestIssue201: + def test_encoded_unicode_tag(self): + round_trip_load( + """ + s: !!python/%75nicode 'abc' + """ + ) + + +class TestImplicitTaggedNodes: + def test_scalar(self): + round_trip( + """\ + - !Scalar abcdefg + """ + ) + + def test_mapping(self): + round_trip( + """\ + - !Mapping {a: 1, b: 2} + """ + ) + + def test_sequence(self): + yaml = YAML() + yaml.brace_single_entry_mapping_in_flow_sequence = True + yaml.mapping_value_align = True + yaml.round_trip( + """ + - !Sequence [a, {b: 1}, {c: {d: 3}}] + """ + ) + + def test_sequence2(self): + yaml = YAML() + yaml.mapping_value_align = True + yaml.round_trip( + """ + - !Sequence [a, b: 1, c: {d: 3}] + """ + ) diff --git a/_test/test_version.py b/_test/test_version.py new file mode 100644 index 0000000..963fa66 --- /dev/null +++ b/_test/test_version.py @@ -0,0 +1,177 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_load + + +def load(s, version=None): + import ruyaml # NOQA + + yaml = ruyaml.YAML() + yaml.version = version + return yaml.load(dedent(s)) + + +class TestVersions: + def test_explicit_1_2(self): + r = load( + """\ + %YAML 1.2 + --- + - 12:34:56 + - 012 + - 012345678 + - 0o12 + - on + - off + - yes + - no + - true + """ + ) + assert r[0] == '12:34:56' + assert r[1] == 12 + assert r[2] == 12345678 + assert r[3] == 10 + assert r[4] == 'on' + assert r[5] == 'off' + assert r[6] == 'yes' + assert r[7] == 'no' + assert r[8] is True + + def test_explicit_1_1(self): + r = load( + """\ + %YAML 1.1 + --- + - 12:34:56 + - 012 + - 012345678 + - 0o12 + - on + - off + - yes + - no + - true + """ + ) + assert r[0] == 45296 + assert r[1] == 10 + assert r[2] == '012345678' + assert r[3] == '0o12' + assert r[4] is True + assert r[5] is False + assert r[6] is True + assert r[7] is False + assert r[8] is True + + def test_implicit_1_2(self): + r = load( + """\ + - 12:34:56 + - 12:34:56.78 + - 012 + - 012345678 + - 0o12 + - on + - off + - yes + - no + - true + """ + ) + assert r[0] == '12:34:56' + assert r[1] == '12:34:56.78' + assert r[2] == 12 + assert r[3] == 12345678 + assert r[4] == 10 + assert r[5] == 'on' + assert r[6] == 'off' + assert r[7] == 'yes' + assert r[8] == 'no' + assert r[9] is True + + def test_load_version_1_1(self): + inp = """\ + - 12:34:56 + - 12:34:56.78 + - 012 + - 012345678 + - 0o12 + - on + - off + - yes + - no + - true + """ + r = load(inp, version='1.1') + assert r[0] == 45296 + assert r[1] == 45296.78 + assert r[2] == 10 + assert r[3] == '012345678' + assert r[4] == '0o12' + assert r[5] is True + assert r[6] is False + assert r[7] is True + assert r[8] is False + assert r[9] is True + + +class TestIssue62: + # bitbucket issue 62, issue_62 + def test_00(self): + import ruyaml # NOQA + + s = dedent( + """\ + {}# Outside flow collection: + - ::vector + - ": - ()" + - Up, up, and away! + - -123 + - http://example.com/foo#bar + # Inside flow collection: + - [::vector, ": - ()", "Down, down and away!", -456, http://example.com/foo#bar] + """ + ) + with pytest.raises(ruyaml.parser.ParserError): + round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True) + round_trip(s.format(""), preserve_quotes=True) + + def test_00_single_comment(self): + import ruyaml # NOQA + + s = dedent( + """\ + {}# Outside flow collection: + - ::vector + - ": - ()" + - Up, up, and away! + - -123 + - http://example.com/foo#bar + - [::vector, ": - ()", "Down, down and away!", -456, http://example.com/foo#bar] + """ + ) + with pytest.raises(ruyaml.parser.ParserError): + round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True) + round_trip(s.format(""), preserve_quotes=True) + # round_trip(s.format('%YAML 1.2\n---\n'), preserve_quotes=True, version=(1, 2)) + + def test_01(self): + import ruyaml # NOQA + + s = dedent( + """\ + {}[random plain value that contains a ? character] + """ + ) + with pytest.raises(ruyaml.parser.ParserError): + round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True) + round_trip(s.format(""), preserve_quotes=True) + # note the flow seq on the --- line! + round_trip(s.format('%YAML 1.2\n--- '), preserve_quotes=True, version='1.2') + + def test_so_45681626(self): + # was not properly parsing + round_trip_load('{"in":{},"out":{}}') diff --git a/_test/test_yamlfile.py b/_test/test_yamlfile.py new file mode 100644 index 0000000..149478a --- /dev/null +++ b/_test/test_yamlfile.py @@ -0,0 +1,229 @@ +# coding: utf-8 + +""" +various test cases for YAML files +""" + +import io +import platform + +import pytest # NOQA + +from .roundtrip import dedent, round_trip, round_trip_dump, round_trip_load # NOQA + + +class TestYAML: + def test_backslash(self): + round_trip( + """ + handlers: + static_files: applications/\\1/static/\\2 + """ + ) + + def test_omap_out(self): + # ordereddict mapped to !!omap + import ruyaml # NOQA + from ruyaml.compat import ordereddict + + x = ordereddict([('a', 1), ('b', 2)]) + res = round_trip_dump(x, default_flow_style=False) + assert res == dedent( + """ + !!omap + - a: 1 + - b: 2 + """ + ) + + def test_omap_roundtrip(self): + round_trip( + """ + !!omap + - a: 1 + - b: 2 + - c: 3 + - d: 4 + """ + ) + + def test_dump_collections_ordereddict(self): + from collections import OrderedDict + + import ruyaml # NOQA + + # OrderedDict mapped to !!omap + x = OrderedDict([('a', 1), ('b', 2)]) + res = round_trip_dump(x, default_flow_style=False) + assert res == dedent( + """ + !!omap + - a: 1 + - b: 2 + """ + ) + + def test_CommentedSet(self): + from ruyaml.constructor import CommentedSet + + s = CommentedSet(['a', 'b', 'c']) + s.remove('b') + s.add('d') + assert s == CommentedSet(['a', 'c', 'd']) + s.add('e') + s.add('f') + s.remove('e') + assert s == CommentedSet(['a', 'c', 'd', 'f']) + + def test_set_out(self): + # preferable would be the shorter format without the ': null' + import ruyaml # NOQA + + x = set(['a', 'b', 'c']) + # cannot use round_trip_dump, it doesn't show null in block style + buf = io.StringIO() + yaml = ruyaml.YAML(typ='unsafe', pure=True) + yaml.default_flow_style = False + yaml.dump(x, buf) + assert buf.getvalue() == dedent( + """ + !!set + a: null + b: null + c: null + """ + ) + + # ordering is not preserved in a set + def test_set_compact(self): + # this format is read and also should be written by default + round_trip( + """ + !!set + ? a + ? b + ? c + """ + ) + + def test_blank_line_after_comment(self): + round_trip( + """ + # Comment with spaces after it. + + + a: 1 + """ + ) + + def test_blank_line_between_seq_items(self): + round_trip( + """ + # Seq with empty lines in between items. + b: + - bar + + + - baz + """ + ) + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_blank_line_after_literal_chip(self): + s = """ + c: + - | + This item + has a blank line + following it. + + - | + To visually separate it from this item. + + This item contains a blank line. + + + """ + d = round_trip_load(dedent(s)) + print(d) + round_trip(s) + assert d['c'][0].split('it.')[1] == '\n' + assert d['c'][1].split('line.')[1] == '\n' + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_blank_line_after_literal_keep(self): + """have to insert an eof marker in YAML to test this""" + s = """ + c: + - |+ + This item + has a blank line + following it. + + - |+ + To visually separate it from this item. + + This item contains a blank line. + + + ... + """ + d = round_trip_load(dedent(s)) + print(d) + round_trip(s) + assert d['c'][0].split('it.')[1] == '\n\n' + assert d['c'][1].split('line.')[1] == '\n\n\n' + + @pytest.mark.skipif( + platform.python_implementation() == 'Jython', + reason='Jython throws RepresenterError', + ) + def test_blank_line_after_literal_strip(self): + s = """ + c: + - |- + This item + has a blank line + following it. + + - |- + To visually separate it from this item. + + This item contains a blank line. + + + """ + d = round_trip_load(dedent(s)) + print(d) + round_trip(s) + assert d['c'][0].split('it.')[1] == "" + assert d['c'][1].split('line.')[1] == "" + + def test_load_all_perserve_quotes(self): + import ruyaml # NOQA + + yaml = ruyaml.YAML() + yaml.preserve_quotes = True + s = dedent( + """\ + a: 'hello' + --- + b: "goodbye" + """ + ) + data = [] + for x in yaml.load_all(s): + data.append(x) + buf = ruyaml.compat.StringIO() + yaml.dump_all(data, buf) + out = buf.getvalue() + print(type(data[0]['a']), data[0]['a']) + # out = ruyaml.round_trip_dump_all(data) + print(out) + assert out == s diff --git a/_test/test_yamlobject.py b/_test/test_yamlobject.py new file mode 100644 index 0000000..0f9c48c --- /dev/null +++ b/_test/test_yamlobject.py @@ -0,0 +1,82 @@ +# coding: utf-8 + +import pytest # NOQA + +from .roundtrip import save_and_run # NOQA + + +def test_monster(tmpdir): + program_src = '''\ + import ruyaml + from textwrap import dedent + + class Monster(ruyaml.YAMLObject): + yaml_tag = '!Monster' + + def __init__(self, name, hp, ac, attacks): + self.name = name + self.hp = hp + self.ac = ac + self.attacks = attacks + + def __repr__(self): + return "%s(name=%r, hp=%r, ac=%r, attacks=%r)" % ( + self.__class__.__name__, self.name, self.hp, self.ac, self.attacks) + + data = ruyaml.load(dedent("""\\ + --- !Monster + name: Cave spider + hp: [2,6] # 2d6 + ac: 16 + attacks: [BITE, HURT] + """), Loader=ruyaml.Loader) + # normal dump, keys will be sorted + assert ruyaml.dump(data) == dedent("""\\ + !Monster + ac: 16 + attacks: [BITE, HURT] + hp: [2, 6] + name: Cave spider + """) + ''' + assert save_and_run(program_src, tmpdir) == 0 + + +def test_qualified_name00(tmpdir): + """issue 214""" + program_src = """\ + import ruyaml + from io import StringIO + + class A: + def f(self): + pass + + yaml = ruyaml.YAML(typ='unsafe', pure=True) + yaml.explicit_end = True + buf = StringIO() + yaml.dump(A.f, buf) + res = buf.getvalue() + print('res', repr(res)) + assert res == "!!python/name:__main__.A.f ''\\n...\\n" + x = ruyaml.load(res) + assert x == A.f + """ + assert save_and_run(program_src, tmpdir) == 0 + + +def test_qualified_name01(tmpdir): + """issue 214""" + from io import StringIO + + import ruyaml.comments + from ruyaml import YAML + + yaml = YAML(typ='unsafe', pure=True) + yaml.explicit_end = True + buf = StringIO() + yaml.dump(ruyaml.comments.CommentedBase.yaml_anchor, buf) + res = buf.getvalue() + assert res == "!!python/name:ruyaml.comments.CommentedBase.yaml_anchor ''\n...\n" + x = yaml.load(res) + assert x == ruyaml.comments.CommentedBase.yaml_anchor diff --git a/_test/test_z_check_debug_leftovers.py b/_test/test_z_check_debug_leftovers.py new file mode 100644 index 0000000..a446dae --- /dev/null +++ b/_test/test_z_check_debug_leftovers.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +import sys + +import pytest # NOQA + +from .roundtrip import dedent, round_trip_dump, round_trip_load + + +class TestLeftOverDebug: + # idea here is to capture round_trip_output via pytest stdout capture + # if there is are any leftover debug statements they should show up + def test_00(self, capsys): + s = dedent( + """ + a: 1 + b: [] + c: [a, 1] + d: {f: 3.14, g: 42} + """ + ) + d = round_trip_load(s) + round_trip_dump(d, sys.stdout) + out, err = capsys.readouterr() + assert out == s + + def test_01(self, capsys): + s = dedent( + """ + - 1 + - [] + - [a, 1] + - {f: 3.14, g: 42} + - - 123 + """ + ) + d = round_trip_load(s) + round_trip_dump(d, sys.stdout) + out, err = capsys.readouterr() + assert out == s diff --git a/_test/test_z_data.py b/_test/test_z_data.py new file mode 100644 index 0000000..273eddf --- /dev/null +++ b/_test/test_z_data.py @@ -0,0 +1,272 @@ +# coding: utf-8 + +import os +import sys +import warnings # NOQA +from pathlib import Path + +import pytest # NOQA + +from ruyaml.compat import _F + +base_path = Path('data') # that is ruamel.yaml.data + + +class YAMLData: + yaml_tag = '!YAML' + + def __init__(self, s): + self._s = s + + # Conversion tables for input. E.g. "" is replaced by "\t" + # fmt: off + special = { + 'SPC': ' ', + 'TAB': '\t', + '---': '---', + '...': '...', + } + # fmt: on + + @property + def value(self): + if hasattr(self, '_p'): + return self._p + assert ' \n' not in self._s + assert '\t\n' not in self._s + self._p = self._s + for k, v in YAMLData.special.items(): + k = '<' + k + '>' + self._p = self._p.replace(k, v) + return self._p + + def test_rewrite(self, s): + assert ' \n' not in s + assert '\t\n' not in s + for k, v in YAMLData.special.items(): + k = '<' + k + '>' + s = s.replace(k, v) + return s + + @classmethod + def from_yaml(cls, constructor, node): + from ruyaml.nodes import MappingNode + + if isinstance(node, MappingNode): + return cls(constructor.construct_mapping(node)) + return cls(node.value) + + +class Python(YAMLData): + yaml_tag = '!Python' + + +class Output(YAMLData): + yaml_tag = '!Output' + + +class Assert(YAMLData): + yaml_tag = '!Assert' + + @property + def value(self): + from collections.abc import Mapping + + if hasattr(self, '_pa'): + return self._pa + if isinstance(self._s, Mapping): + self._s['lines'] = self.test_rewrite(self._s['lines']) + self._pa = self._s + return self._pa + + +def pytest_generate_tests(metafunc): + test_yaml = [] + paths = sorted(base_path.glob('**/*.yaml')) + idlist = [] + for path in paths: + # while developing tests put them in data/debug and run: + # auto -c "pytest _test/test_z_data.py" data/debug/*.yaml *.py _test/*.py + if os.environ.get('RUAMELAUTOTEST') == '1': + if path.parent.stem != 'debug': + continue + elif path.parent.stem == 'debug': + # don't test debug entries for production + continue + stem = path.stem + if stem.startswith('.#'): # skip emacs temporary file + continue + idlist.append(stem) + test_yaml.append([path]) + metafunc.parametrize(['yaml'], test_yaml, ids=idlist, scope='class') + + +class TestYAMLData: + def yaml(self, yaml_version=None): + from ruyaml import YAML + + y = YAML() + y.preserve_quotes = True + if yaml_version: + y.version = yaml_version + return y + + def docs(self, path): + from ruyaml import YAML + + tyaml = YAML(typ='safe', pure=True) + tyaml.register_class(YAMLData) + tyaml.register_class(Python) + tyaml.register_class(Output) + tyaml.register_class(Assert) + return list(tyaml.load_all(path)) + + def yaml_load(self, value, yaml_version=None): + yaml = self.yaml(yaml_version=yaml_version) + data = yaml.load(value) + return yaml, data + + def round_trip(self, input, output=None, yaml_version=None): + from io import StringIO + + yaml, data = self.yaml_load(input.value, yaml_version=yaml_version) + buf = StringIO() + yaml.dump(data, buf) + expected = input.value if output is None else output.value + value = buf.getvalue() + assert value == expected + + def load_assert(self, input, confirm, yaml_version=None): + from collections.abc import Mapping + + d = self.yaml_load(input.value, yaml_version=yaml_version)[1] # NOQA + print('confirm.value', confirm.value, type(confirm.value)) + if isinstance(confirm.value, Mapping): + r = range(confirm.value['range']) + lines = confirm.value['lines'].splitlines() + for idx in r: # NOQA + for line in lines: + line = 'assert ' + line + print(line) + exec(line) + else: + for line in confirm.value.splitlines(): + line = 'assert ' + line + print(line) + exec(line) + + def run_python(self, python, data, tmpdir, input=None): + from roundtrip import save_and_run + + if input is not None: + (tmpdir / 'input.yaml').write_text(input.value, encoding='utf-8') + assert save_and_run(python.value, base_dir=tmpdir, output=data.value) == 0 + + def insert_comments(self, data, actions): + """this is to automatically insert based on: + path (a.1.b), + position (before, after, between), and + offset (absolute/relative) + """ + raise NotImplementedError + expected = [] + for line in data.value.splitlines(True): + idx = line.index['?'] + if idx < 0: + expected.append(line) + continue + assert line.lstrip()[0] == '#' # it has to be comment line + print(data) + assert ''.join(expected) == data.value + + # this is executed by pytest the methods with names not starting with + # test_ are helper methods + def test_yaml_data(self, yaml, tmpdir): + from collections.abc import Mapping + + idx = 0 + typ = None + yaml_version = None + + docs = self.docs(yaml) + if isinstance(docs[0], Mapping): + d = docs[0] + typ = d.get('type') + yaml_version = d.get('yaml_version') + if 'python' in d: + if not check_python_version(d['python']): + pytest.skip('unsupported version') + idx += 1 + data = output = confirm = python = None + for doc in docs[idx:]: + if isinstance(doc, Output): + output = doc + elif isinstance(doc, Assert): + confirm = doc + elif isinstance(doc, Python): + python = doc + if typ is None: + typ = 'python_run' + elif isinstance(doc, YAMLData): + data = doc + else: + print('no handler for type:', type(doc), repr(doc)) + raise AssertionError() + if typ is None: + if data is not None and output is not None: + typ = 'rt' + elif data is not None and confirm is not None: + typ = 'load_assert' + else: + assert data is not None + typ = 'rt' + print('type:', typ) + if data is not None: + print('data:', data.value, end='') + print('output:', output.value if output is not None else output) + if typ == 'rt': + self.round_trip(data, output, yaml_version=yaml_version) + elif typ == 'python_run': + inp = None if output is None or data is None else data + self.run_python( + python, output if output is not None else data, tmpdir, input=inp + ) + elif typ == 'load_assert': + self.load_assert(data, confirm, yaml_version=yaml_version) + elif typ == 'comment': + actions = [] + self.insert_comments(data, actions) + else: + _F('\n>>>>>> run type unknown: "{typ}" <<<<<<\n') + raise AssertionError() + + +def check_python_version(match, current=None): + """ + version indication, return True if version matches. + match should be something like 3.6+, or [2.7, 3.3] etc. Floats + are converted to strings. Single values are made into lists. + """ + if current is None: + current = list(sys.version_info[:3]) + if not isinstance(match, list): + match = [match] + for m in match: + minimal = False + if isinstance(m, float): + m = str(m) + if m.endswith('+'): + minimal = True + m = m[:-1] + # assert m[0].isdigit() + # assert m[-1].isdigit() + m = [int(x) for x in m.split('.')] + current_len = current[: len(m)] + # print(m, current, current_len) + if minimal: + if current_len >= m: + return True + else: + if current_len == m: + return True + return False diff --git a/_test/test_z_olddata.py b/_test/test_z_olddata.py new file mode 100644 index 0000000..f260aad --- /dev/null +++ b/_test/test_z_olddata.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +import os +import sys + +import pytest # NOQA + +sys.path.insert(0, os.path.dirname(__file__) + '/lib') + +import warnings # NOQA + +args = [] + + +def test_data(): + import test_appliance # NOQA + + warnings.simplefilter('ignore', PendingDeprecationWarning) + collections = [] + import test_yaml + + collections.append(test_yaml) + test_appliance.run(collections, args) + + +# @pytest.mark.skipif(not ruyaml.__with_libyaml__, +# reason="no libyaml") + + +def test_data_ext(): + collections = [] + import test_appliance # NOQA + + import ruyaml + + warnings.simplefilter('ignore', ruyaml.error.UnsafeLoaderWarning) + warnings.simplefilter('ignore', PendingDeprecationWarning) + if ruyaml.__with_libyaml__: + import test_yaml_ext + + collections.append(test_yaml_ext) + test_appliance.run(collections, args) diff --git a/lib/ruyaml/__init__.py b/lib/ruyaml/__init__.py new file mode 100644 index 0000000..8d90d23 --- /dev/null +++ b/lib/ruyaml/__init__.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +from __future__ import absolute_import, division, print_function, unicode_literals + +if False: # MYPY + from typing import Any, Dict # NOQA + +_package_data = dict( + full_package_name='ruyaml', + version_info=(0, 16, 7), + __version__='0.16.7', + author='ruyaml contributors', + author_email='none.yet@github.org', + description='ruyaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA + entry_points=None, + since=2014, + extras_require={ + ':platform_python_implementation=="CPython" and python_version<"3.8"': [ + 'ruyaml.clib>=0.1.2', + ], + }, + # NOQA + # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n', # NOQA + classifiers=[ + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Programming Language :: Python :: Implementation :: Jython', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Processing :: Markup', + 'Typing :: Typed', + ], + keywords='yaml 1.2 parser round-trip preserve quotes order config', + read_the_docs='yaml', + supported=[(2, 7), (3, 5)], # minimum + tox=dict( + env='*pn', # also test narrow Python 2.7.15 for unicode patterns + fl8excl='_test/lib', + ), + universal=True, + rtfd='yaml', +) # type: Dict[Any, Any] + + +version_info = _package_data['version_info'] +__version__ = _package_data['__version__'] + +try: + from .cyaml import * # NOQA + + __with_libyaml__ = True +except (ImportError, ValueError): # for Jython + __with_libyaml__ = False + +from ruyaml.main import * # NOQA diff --git a/lib/ruyaml/anchor.py b/lib/ruyaml/anchor.py new file mode 100644 index 0000000..369d5b1 --- /dev/null +++ b/lib/ruyaml/anchor.py @@ -0,0 +1,20 @@ +# coding: utf-8 +if False: # MYPY + from typing import Any, Dict, Iterator, List, Optional, Union # NOQA + +anchor_attrib = '_yaml_anchor' + + +class Anchor: + __slots__ = 'value', 'always_dump' + attrib = anchor_attrib + + def __init__(self): + # type: () -> None + self.value = None + self.always_dump = False + + def __repr__(self): + # type: () -> Any + ad = ', (always dump)' if self.always_dump else "" + return 'Anchor({!r}{})'.format(self.value, ad) diff --git a/lib/ruyaml/comments.py b/lib/ruyaml/comments.py new file mode 100644 index 0000000..db701c8 --- /dev/null +++ b/lib/ruyaml/comments.py @@ -0,0 +1,1280 @@ +# coding: utf-8 + +""" +stuff to deal with comments and formatting on dict/list/ordereddict/set +these are not really related, formatting could be factored out as +a separate base +""" + +import copy +import sys +from collections.abc import Mapping, MutableSet, Set, Sized +from typing import Any, Dict, Iterator, List, Optional, Union + +from ruyaml.anchor import Anchor +from ruyaml.compat import nprintf # NOQA +from ruyaml.compat import ordereddict # NOQA; type: ignore +from ruyaml.compat import _F, MutableSliceableSequence +from ruyaml.scalarstring import ScalarString + +if False: # MYPY + from typing import Any, Dict, Iterator, List, Optional, Union # NOQA + +# fmt: off +__all__ = ['CommentedSeq', 'CommentedKeySeq', + 'CommentedMap', 'CommentedOrderedMap', + 'CommentedSet', 'comment_attrib', 'merge_attrib', + 'C_POST', 'C_PRE', 'C_SPLIT_ON_FIRST_BLANK', 'C_BLANK_LINE_PRESERVE_SPACE', + ] +# fmt: on + +# splitting of comments by the scanner +# an EOLC (End-Of-Line Comment) is preceded by some token +# an FLC (Full Line Comment) is a comment not preceded by a token, i.e. # is +# the first non-blank on line +# a BL is a blank line i.e. empty or spaces/tabs only +# bits 0 and 1 are combined, you can choose only one +C_POST = 0b00 +C_PRE = 0b01 +C_SPLIT_ON_FIRST_BLANK = ( + 0b10 # as C_POST, but if blank line then C_PRE all lines before +) +# first blank goes to POST even if no following real FLC +# (first blank -> first of post) +# 0b11 -> reserved for future use +C_BLANK_LINE_PRESERVE_SPACE = 0b100 +# C_EOL_PRESERVE_SPACE2 = 0b1000 + + +class IDX: + # temporary auto increment, so rearranging is easier + def __init__(self): + # type: () -> None + self._idx = 0 + + def __call__(self): + # type: () -> Any + x = self._idx + self._idx += 1 + return x + + def __str__(self): + # type: () -> Any + return str(self._idx) + + +cidx = IDX() + +# more or less in order of subjective expected likelyhood +# the _POST and _PRE ones are lists themselves +C_VALUE_EOL = C_ELEM_EOL = cidx() +C_KEY_EOL = cidx() +C_KEY_PRE = C_ELEM_PRE = cidx() # not this is not value +C_VALUE_POST = C_ELEM_POST = cidx() # not this is not value +C_VALUE_PRE = cidx() +C_KEY_POST = cidx() +C_TAG_EOL = cidx() +C_TAG_POST = cidx() +C_TAG_PRE = cidx() +C_ANCHOR_EOL = cidx() +C_ANCHOR_POST = cidx() +C_ANCHOR_PRE = cidx() + + +comment_attrib = '_yaml_comment' +format_attrib = '_yaml_format' +line_col_attrib = '_yaml_line_col' +merge_attrib = '_yaml_merge' +tag_attrib = '_yaml_tag' + + +class Comment: + # using sys.getsize tested the Comment objects, __slots__ makes them bigger + # and adding self.end did not matter + __slots__ = 'comment', '_items', '_post', '_pre' + attrib = comment_attrib + + def __init__(self, old=True): + # type: (bool) -> None + self._pre = None if old else [] # type: ignore + self.comment = None # [post, [pre]] + # map key (mapping/omap/dict) or index (sequence/list) to a list of + # dict: post_key, pre_key, post_value, pre_value + # list: pre item, post item + self._items = {} # type: Dict[Any, Any] + # self._start = [] # should not put these on first item + self._post = [] # type: List[Any] # end of document comments + + def __str__(self): + # type: () -> str + if bool(self._post): + end = ',\n end=' + str(self._post) + else: + end = "" + return 'Comment(comment={0},\n items={1}{2})'.format( + self.comment, self._items, end + ) + + def _old__repr__(self): + # type: () -> str + if bool(self._post): + end = ',\n end=' + str(self._post) + else: + end = "" + ln = '' # type: Union[str,int] + try: + ln = max([len(str(k)) for k in self._items]) + 1 + except ValueError: + pass + it = ' '.join( + ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()] + ) + if it: + it = '\n ' + it + ' ' + return 'Comment(\n start={},\n items={{{}}}{})'.format(self.comment, it, end) + + def __repr__(self): + # type: () -> str + if self._pre is None: + return self._old__repr__() + if bool(self._post): + end = ',\n end=' + repr(self._post) + else: + end = "" + try: + ln = max([len(str(k)) for k in self._items]) + 1 + except ValueError: + ln = '' # type: ignore + it = ' '.join( + ['{:{}} {}\n'.format(str(k) + ':', ln, v) for k, v in self._items.items()] + ) + if it: + it = '\n ' + it + ' ' + return 'Comment(\n pre={},\n items={{{}}}{})'.format(self.pre, it, end) + + @property + def items(self): + # type: () -> Any + return self._items + + @property + def end(self): + # type: () -> Any + return self._post + + @end.setter + def end(self, value): + # type: (Any) -> None + self._post = value + + @property + def pre(self): + # type: () -> Any + return self._pre + + @pre.setter + def pre(self, value): + # type: (Any) -> None + self._pre = value + + def get(self, item, pos): + # type: (Any, Any) -> Any + x = self._items.get(item) + if x is None or len(x) < pos: + return None + return x[pos] # can be None + + def set(self, item, pos, value): + # type: (Any, Any, Any) -> Any + x = self._items.get(item) + if x is None: + self._items[item] = x = [None] * (pos + 1) + else: + while len(x) <= pos: + x.append(None) + assert x[pos] is None + x[pos] = value + + def __contains__(self, x): + # type: (Any) -> Any + # test if a substring is in any of the attached comments + if self.comment: + if self.comment[0] and x in self.comment[0].value: + return True + if self.comment[1]: + for c in self.comment[1]: + if x in c.value: + return True + for value in self.items.values(): + if not value: + continue + for c in value: + if c and x in c.value: + return True + if self.end: + for c in self.end: + if x in c.value: + return True + return False + + +# to distinguish key from None +def NoComment(): + # type: () -> None + pass + + +class Format: + __slots__ = ('_flow_style',) + attrib = format_attrib + + def __init__(self): + # type: () -> None + self._flow_style = None # type: Any + + def set_flow_style(self): + # type: () -> None + self._flow_style = True + + def set_block_style(self): + # type: () -> None + self._flow_style = False + + def flow_style(self, default=None): + # type: (Optional[Any]) -> Any + """if default (the flow_style) is None, the flow style tacked on to + the object explicitly will be taken. If that is None as well the + default flow style rules the format down the line, or the type + of the constituent values (simple -> flow, map/list -> block)""" + if self._flow_style is None: + return default + return self._flow_style + + +class LineCol: + """ + line and column information wrt document, values start at zero (0) + """ + + attrib = line_col_attrib + + def __init__(self): + # type: () -> None + self.line = None + self.col = None + self.data = None # type: Optional[Dict[Any, Any]] + + def add_kv_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def key(self, k): + # type: (Any) -> Any + return self._kv(k, 0, 1) + + def value(self, k): + # type: (Any) -> Any + return self._kv(k, 2, 3) + + def _kv(self, k, x0, x1): + # type: (Any, Any, Any) -> Any + if self.data is None: + return None + data = self.data[k] + return data[x0], data[x1] + + def item(self, idx): + # type: (Any) -> Any + if self.data is None: + return None + return self.data[idx][0], self.data[idx][1] + + def add_idx_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def __repr__(self): + # type: () -> str + return _F('LineCol({line}, {col})', line=self.line, col=self.col) # type: ignore + + +class Tag: + """store tag information for roundtripping""" + + __slots__ = ('value',) + attrib = tag_attrib + + def __init__(self): + # type: () -> None + self.value = None + + def __repr__(self): + # type: () -> Any + return '{0.__class__.__name__}({0.value!r})'.format(self) + + +class CommentedBase: + @property + def ca(self): + # type: () -> Any + if not hasattr(self, Comment.attrib): + setattr(self, Comment.attrib, Comment()) + return getattr(self, Comment.attrib) + + def yaml_end_comment_extend(self, comment, clear=False): + # type: (Any, bool) -> None + if comment is None: + return + if clear or self.ca.end is None: + self.ca.end = [] + self.ca.end.extend(comment) + + def yaml_key_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[1] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[1] = comment[1] + else: + r[1].extend(comment[0]) + r[0] = comment[0] + + def yaml_value_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[3] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[3] = comment[1] + else: + r[3].extend(comment[0]) + r[2] = comment[0] + + def yaml_set_start_comment(self, comment, indent=0): + # type: (Any, Any) -> None + """overwrites any preceding comment lines on an object + expects comment to be without `#` and possible have multiple lines + """ + from ruyaml.error import CommentMark + from ruyaml.tokens import CommentToken + + pre_comments = self._yaml_clear_pre_comment() # type: ignore + if comment[-1] == '\n': + comment = comment[:-1] # strip final newline if there + start_mark = CommentMark(indent) + for com in comment.split('\n'): + c = com.strip() + if len(c) > 0 and c[0] != '#': + com = '# ' + com + pre_comments.append(CommentToken(com + '\n', start_mark)) + + def yaml_set_comment_before_after_key( + self, key, before=None, indent=0, after=None, after_indent=None + ): + # type: (Any, Any, Any, Any, Any) -> None + """ + expects comment (before/after) to be without `#` and possible have multiple lines + """ + from ruyaml.error import CommentMark + from ruyaml.tokens import CommentToken + + def comment_token(s, mark): + # type: (Any, Any) -> Any + # handle empty lines as having no comment + return CommentToken(('# ' if s else "") + s + '\n', mark) + + if after_indent is None: + after_indent = indent + 2 + if before and (len(before) > 1) and before[-1] == '\n': + before = before[:-1] # strip final newline if there + if after and after[-1] == '\n': + after = after[:-1] # strip final newline if there + start_mark = CommentMark(indent) + c = self.ca.items.setdefault(key, [None, [], None, None]) + if before is not None: + if c[1] is None: + c[1] = [] + if before == '\n': + c[1].append(comment_token("", start_mark)) # type: ignore + else: + for com in before.split('\n'): + c[1].append(comment_token(com, start_mark)) # type: ignore + if after: + start_mark = CommentMark(after_indent) + if c[3] is None: + c[3] = [] + for com in after.split('\n'): + c[3].append(comment_token(com, start_mark)) # type: ignore + + @property + def fa(self): + # type: () -> Any + """format attribute + + set_flow_style()/set_block_style()""" + if not hasattr(self, Format.attrib): + setattr(self, Format.attrib, Format()) + return getattr(self, Format.attrib) + + def yaml_add_eol_comment(self, comment, key=NoComment, column=None): + # type: (Any, Optional[Any], Optional[Any]) -> None + """ + there is a problem as eol comments should start with ' #' + (but at the beginning of the line the space doesn't have to be before + the #. The column index is for the # mark + """ + from ruyaml.error import CommentMark + from ruyaml.tokens import CommentToken + + if column is None: + try: + column = self._yaml_get_column(key) + except AttributeError: + column = 0 + if comment[0] != '#': + comment = '# ' + comment + if column is None: + if comment[0] == '#': + comment = ' ' + comment + column = 0 + start_mark = CommentMark(column) + ct = [CommentToken(comment, start_mark), None] + self._yaml_add_eol_comment(ct, key=key) + + @property + def lc(self): + # type: () -> Any + if not hasattr(self, LineCol.attrib): + setattr(self, LineCol.attrib, LineCol()) + return getattr(self, LineCol.attrib) + + def _yaml_set_line_col(self, line, col): + # type: (Any, Any) -> None + self.lc.line = line + self.lc.col = col + + def _yaml_set_kv_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_kv_line_col(key, data) + + def _yaml_set_idx_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_idx_line_col(key, data) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + return None + return self.anchor + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + @property + def tag(self): + # type: () -> Any + if not hasattr(self, Tag.attrib): + setattr(self, Tag.attrib, Tag()) + return getattr(self, Tag.attrib) + + def yaml_set_tag(self, value): + # type: (Any) -> None + self.tag.value = value + + def copy_attributes(self, t, memo=None): + # type: (Any, Any) -> None + # fmt: off + for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib, + Tag.attrib, merge_attrib]: + if hasattr(self, a): + if memo is not None: + setattr(t, a, copy.deepcopy(getattr(self, a, memo))) + else: + setattr(t, a, getattr(self, a)) + # fmt: on + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + raise NotImplementedError + + def _yaml_get_pre_comment(self): + # type: () -> Any + raise NotImplementedError + + def _yaml_clear_pre_comment(self): + # type: () -> Any + raise NotImplementedError + + def _yaml_get_column(self, key): + # type: (Any) -> Any + raise NotImplementedError + + +class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_lst') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + list.__init__(self, *args, **kw) + + def __getsingleitem__(self, idx): + # type: (Any) -> Any + return list.__getitem__(self, idx) + + def __setsingleitem__(self, idx, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if idx < len(self): + if ( + isinstance(value, str) + and not isinstance(value, ScalarString) + and isinstance(self[idx], ScalarString) + ): + value = type(self[idx])(value) + list.__setitem__(self, idx, value) + + def __delsingleitem__(self, idx=None): + # type: (Any) -> Any + list.__delitem__(self, idx) # type: ignore + self.ca.items.pop(idx, None) # might not be there -> default value + for list_index in sorted(self.ca.items): + if list_index < idx: + continue + self.ca.items[list_index - 1] = self.ca.items.pop(list_index) + + def __len__(self): + # type: () -> int + return list.__len__(self) + + def insert(self, idx, val): + # type: (Any, Any) -> None + """the comments after the insertion have to move forward""" + list.insert(self, idx, val) + for list_index in sorted(self.ca.items, reverse=True): + if list_index < idx: + break + self.ca.items[list_index + 1] = self.ca.items.pop(list_index) + + def extend(self, val): + # type: (Any) -> None + list.extend(self, val) + + def __eq__(self, other): + # type: (Any) -> bool + return list.__eq__(self, other) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment, clear=True) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res.append(copy.deepcopy(k, memo)) + self.copy_attributes(res, memo=memo) + return res + + def __add__(self, other): + # type: (Any) -> Any + return list.__add__(self, other) + + def sort(self, key=None, reverse=False): + # type: (Any, bool) -> None + if key is None: + tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse) + list.__init__(self, [x[0] for x in tmp_lst]) + else: + tmp_lst = sorted( + zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse + ) + list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst]) + itm = self.ca.items + self.ca._items = {} + for idx, x in enumerate(tmp_lst): + old_index = x[1] + if old_index in itm: + self.ca.items[idx] = itm[old_index] + + def __repr__(self): + # type: () -> Any + return list.__repr__(self) + + +class CommentedKeySeq(tuple, CommentedBase): # type: ignore + """This primarily exists to be able to roundtrip keys that are sequences""" + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedMapView(Sized): + __slots__ = ('_mapping',) + + def __init__(self, mapping): + # type: (Any) -> None + self._mapping = mapping + + def __len__(self): + # type: () -> int + count = len(self._mapping) + return count + + +class CommentedMapKeysView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, key): + # type: (Any) -> Any + return key in self._mapping + + def __iter__(self): + # type: () -> Any # yield from self._mapping # not in pypy + # for x in self._mapping._keys(): + for x in self._mapping: + yield x + + +class CommentedMapItemsView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, item): + # type: (Any) -> Any + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield (key, self._mapping[key]) + + +class CommentedMapValuesView(CommentedMapView): + __slots__ = () + + def __contains__(self, value): + # type: (Any) -> Any + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield self._mapping[key] + + +class CommentedMap(ordereddict, CommentedBase): + __slots__ = (Comment.attrib, '_ok', '_ref') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._ok = set() # type: MutableSet[Any] # own keys + self._ref = [] # type: List[CommentedMap] + ordereddict.__init__(self, *args, **kw) + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][2].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post, last = None, None, None + for x in self: + if pre is not None and x != key: + post = x + break + if x == key: + pre = last + last = x + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for k1 in self: + if k1 >= key: + break + if k1 not in self.ca.items: + continue + sel_idx = k1 + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + pre_comments = self.ca.comment[1] + return pre_comments + + def _yaml_clear_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def update(self, *vals, **kw): + # type: (Any, Any) -> None + try: + ordereddict.update(self, *vals, **kw) + except TypeError: + # probably a dict that is used + for x in vals[0]: + self[x] = vals[0][x] + if vals: + try: + self._ok.update(vals[0].keys()) # type: ignore + except AttributeError: + # assume one argument that is a list/tuple of two element lists/tuples + for x in vals[0]: + self._ok.add(x[0]) + if kw: + self._ok.add(*kw.keys()) + + def insert(self, pos, key, value, comment=None): + # type: (Any, Any, Any, Optional[Any]) -> None + """insert key value into given position + attach comment if provided + """ + keys = list(self.keys()) + [key] + ordereddict.insert(self, pos, key, value) + for keytmp in keys: + self._ok.add(keytmp) + for referer in self._ref: + for keytmp in keys: + referer.update_key_value(keytmp) + if comment is not None: + self.yaml_add_eol_comment(comment, key=key) + + def mlget(self, key, default=None, list_ok=False): + # type: (Any, Any, Any) -> Any + """multi-level get that expects dicts within dicts""" + if not isinstance(key, list): + return self.get(key, default) + # assume that the key is a list of recursively accessible dicts + + def get_one_level(key_list, level, d): + # type: (Any, Any, Any) -> Any + if not list_ok: + assert isinstance(d, dict) + if level >= len(key_list): + if level > len(key_list): + raise IndexError + return d[key_list[level - 1]] + return get_one_level(key_list, level + 1, d[key_list[level - 1]]) + + try: + return get_one_level(key, 1, self) + except KeyError: + return default + except (TypeError, IndexError): + if not list_ok: + raise + return default + + def __getitem__(self, key): + # type: (Any) -> Any + try: + return ordereddict.__getitem__(self, key) + except KeyError: + for merged in getattr(self, merge_attrib, []): + if key in merged[1]: + return merged[1][key] + raise + + def __setitem__(self, key, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if key in self: + if ( + isinstance(value, str) + and not isinstance(value, ScalarString) + and isinstance(self[key], ScalarString) + ): + value = type(self[key])(value) + ordereddict.__setitem__(self, key, value) + self._ok.add(key) + + def _unmerged_contains(self, key): + # type: (Any) -> Any + if key in self._ok: + return True + return None + + def __contains__(self, key): + # type: (Any) -> bool + return bool(ordereddict.__contains__(self, key)) + + def get(self, key, default=None): + # type: (Any, Any) -> Any + try: + return self.__getitem__(key) + except: # NOQA + return default + + def __repr__(self): + # type: () -> Any + return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict') + + def non_merged_items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + if x in self._ok: + yield x, ordereddict.__getitem__(self, x) + + def __delitem__(self, key): + # type: (Any) -> None + # for merged in getattr(self, merge_attrib, []): + # if key in merged[1]: + # value = merged[1][key] + # break + # else: + # # not found in merged in stuff + # ordereddict.__delitem__(self, key) + # for referer in self._ref: + # referer.update=_key_value(key) + # return + # + # ordereddict.__setitem__(self, key, value) # merge might have different value + # self._ok.discard(key) + self._ok.discard(key) + ordereddict.__delitem__(self, key) + for referer in self._ref: + referer.update_key_value(key) + + def __iter__(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def _keys(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def __len__(self): + # type: () -> int + return int(ordereddict.__len__(self)) + + def __eq__(self, other): + # type: (Any) -> bool + return bool(dict(self) == other) + + def keys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + def values(self): + # type: () -> Any + return CommentedMapValuesView(self) + + def _items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x, ordereddict.__getitem__(self, x) + + def items(self): + # type: () -> Any + return CommentedMapItemsView(self) + + @property + def merge(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + setattr(self, merge_attrib, []) + return getattr(self, merge_attrib) + + def copy(self): + # type: () -> Any + x = type(self)() # update doesn't work + for k, v in self._items(): + x[k] = v + self.copy_attributes(x) + return x + + def add_referent(self, cm): + # type: (Any) -> None + if cm not in self._ref: + self._ref.append(cm) + + def add_yaml_merge(self, value): + # type: (Any) -> None + for v in value: + v[1].add_referent(self) + for k, v in v[1].items(): + if ordereddict.__contains__(self, k): + continue + ordereddict.__setitem__(self, k, v) + self.merge.extend(value) + + def update_key_value(self, key): + # type: (Any) -> None + if key in self._ok: + return + for v in self.merge: + if key in v[1]: + ordereddict.__setitem__(self, key, v[1][key]) + return + ordereddict.__delitem__(self, key) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res[k] = copy.deepcopy(self[k], memo) + self.copy_attributes(res, memo=memo) + return res + + +# based on brownie mappings +@classmethod # type: ignore +def raise_immutable(cls, *args, **kwargs): + # type: (Any, *Any, **Any) -> None + raise TypeError('{} objects are immutable'.format(cls.__name__)) + + +class CommentedKeyMap(CommentedBase, Mapping): # type: ignore + __slots__ = Comment.attrib, '_od' + """This primarily exists to be able to roundtrip keys that are mappings""" + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + if hasattr(self, '_od'): + raise_immutable(self) + try: + self._od = ordereddict(*args, **kw) + except TypeError: + raise + + __delitem__ = ( + __setitem__ + ) = clear = pop = popitem = setdefault = update = raise_immutable + + # need to implement __getitem__, __iter__ and __len__ + def __getitem__(self, index): + # type: (Any) -> Any + return self._od[index] + + def __iter__(self): + # type: () -> Iterator[Any] + for x in self._od.__iter__(): + yield x + + def __len__(self): + # type: () -> int + return len(self._od) + + def __hash__(self): + # type: () -> Any + return hash(tuple(self.items())) + + def __repr__(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + return self._od.__repr__() + return 'ordereddict(' + repr(list(self._od.items())) + ')' + + @classmethod + def fromkeys(keys, v=None): + # type: (Any, Any) -> Any + return CommentedKeyMap(dict.fromkeys(keys, v)) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedOrderedMap(CommentedMap): + __slots__ = (Comment.attrib,) + + +class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA + __slots__ = Comment.attrib, 'odict' + + def __init__(self, values=None): + # type: (Any) -> None + self.odict = ordereddict() + MutableSet.__init__(self) + if values is not None: + self |= values # type: ignore + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def add(self, value): + # type: (Any) -> None + """Add an element.""" + self.odict[value] = None + + def discard(self, value): + # type: (Any) -> None + """Remove an element. Do not raise an exception if absent.""" + del self.odict[value] + + def __contains__(self, x): + # type: (Any) -> Any + return x in self.odict + + def __iter__(self): + # type: () -> Any + for x in self.odict: + yield x + + def __len__(self): + # type: () -> int + return len(self.odict) + + def __repr__(self): + # type: () -> str + return 'set({0!r})'.format(self.odict.keys()) + + +class TaggedScalar(CommentedBase): + # the value and style attributes are set during roundtrip construction + def __init__(self, value=None, style=None, tag=None): + # type: (Any, Any, Any) -> None + self.value = value + self.style = style + if tag is not None: + self.yaml_set_tag(tag) + + def __str__(self): + # type: () -> Any + return self.value + + +def dump_comments(d, name="", sep='.', out=sys.stdout): + # type: (Any, str, str, Any) -> None + """ + recursively dump comments, all but the toplevel preceded by the path + in dotted form x.0.a + """ + if isinstance(d, dict) and hasattr(d, 'ca'): + if name: + out.write('{} {}\n'.format(name, type(d))) + out.write('{!r}\n'.format(d.ca)) # type: ignore + for k in d: + dump_comments( + d[k], name=(name + sep + str(k)) if name else k, sep=sep, out=out + ) + elif isinstance(d, list) and hasattr(d, 'ca'): + if name: + out.write('{} {}\n'.format(name, type(d))) + out.write('{!r}\n'.format(d.ca)) # type: ignore + for idx, k in enumerate(d): + dump_comments( + k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out + ) diff --git a/lib/ruyaml/compat.py b/lib/ruyaml/compat.py new file mode 100644 index 0000000..91f4c81 --- /dev/null +++ b/lib/ruyaml/compat.py @@ -0,0 +1,263 @@ +# coding: utf-8 + +# partially from package six by Benjamin Peterson + +import collections.abc +import io +import os +import sys +import traceback +from abc import abstractmethod +from typing import Any, Dict, List, Optional, Tuple, Union + +# partially from package six by Benjamin Peterson + + +_DEFAULT_YAML_VERSION = (1, 2) + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict # type: ignore + + # to get the right name import ... as ordereddict doesn't do that + + +class ordereddict(OrderedDict): # type: ignore + if not hasattr(OrderedDict, 'insert'): + + def insert(self, pos, key, value): + # type: (int, Any, Any) -> None + if pos >= len(self): + self[key] = value + return + od = ordereddict() + od.update(self) + for k in od: + del self[k] + for index, old_key in enumerate(od): + if pos == index: + self[key] = value + self[old_key] = od[old_key] + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +# replace with f-strings when 3.5 support is dropped +# ft = '42' +# assert _F('abc {ft!r}', ft=ft) == 'abc %r' % ft +# 'abc %r' % ft -> _F('abc {ft!r}' -> f'abc {ft!r}' +def _F(s, *superfluous, **kw): + # type: (Any, Any, Any) -> Any + if superfluous: + raise TypeError + return s.format(**kw) + + +StringIO = io.StringIO +BytesIO = io.BytesIO + +# StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO] +# StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore +StreamType = Any + +StreamTextType = StreamType # Union[Text, StreamType] +VersionType = Union[List[int], str, Tuple[int, int]] + +builtins_module = 'builtins' + +UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2 + +DBG_TOKEN = 1 +DBG_EVENT = 2 +DBG_NODE = 4 + + +_debug = None # type: Optional[int] +if 'RUAMELDEBUG' in os.environ: + _debugx = os.environ.get('RUAMELDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + + +if bool(_debug): + + class ObjectCounter: + def __init__(self): + # type: () -> None + self.map = {} # type: Dict[Any, Any] + + def __call__(self, k): + # type: (Any) -> None + self.map[k] = self.map.get(k, 0) + 1 + + def dump(self): + # type: () -> None + for k in sorted(self.map): + sys.stdout.write('{} -> {}'.format(k, self.map[k])) + + object_counter = ObjectCounter() + + +# used from yaml util when testing +def dbg(val=None): + # type: (Any) -> Any + global _debug + if _debug is None: + # set to true or false + _debugx = os.environ.get('YAMLDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + if val is None: + return _debug + return _debug & val + + +class Nprint: + def __init__(self, file_name=None): + # type: (Any) -> None + self._max_print = None # type: Any + self._count = None # type: Any + self._file_name = file_name + + def __call__(self, *args, **kw): + # type: (Any, Any) -> None + if not bool(_debug): + return + out = sys.stdout if self._file_name is None else open(self._file_name, 'a') + dbgprint = print # to fool checking for print statements by dv utility + kw1 = kw.copy() + kw1['file'] = out + dbgprint(*args, **kw1) + out.flush() + if self._max_print is not None: + if self._count is None: + self._count = self._max_print + self._count -= 1 + if self._count == 0: + dbgprint('forced exit\n') + traceback.print_stack() + out.flush() + sys.exit(0) + if self._file_name: + out.close() + + def set_max_print(self, i): + # type: (int) -> None + self._max_print = i + self._count = None + + def fp(self, mode='a'): + # type: (str) -> Any + out = sys.stdout if self._file_name is None else open(self._file_name, mode) + return out + + +nprint = Nprint() +nprintf = Nprint('/var/tmp/ruyaml.log') + +# char checkers following production rules + + +def check_namespace_char(ch): + # type: (Any) -> bool + if '\x21' <= ch <= '\x7E': # ! to ~ + return True + if '\xA0' <= ch <= '\uD7FF': + return True + if ('\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF': # excl. byte order mark + return True + if '\U00010000' <= ch <= '\U0010FFFF': + return True + return False + + +def check_anchorname_char(ch): + # type: (Any) -> bool + if ch in ',[]{}': + return False + return check_namespace_char(ch) + + +def version_tnf(t1, t2=None): + # type: (Any, Any) -> Any + """ + return True if ruyaml version_info < t1, None if t2 is specified and bigger else False + """ + from ruyaml import version_info # NOQA + + if version_info < t1: + return True + if t2 is not None and version_info < t2: + return None + return False + + +class MutableSliceableSequence(collections.abc.MutableSequence): # type: ignore + __slots__ = () + + def __getitem__(self, index): + # type: (Any) -> Any + if not isinstance(index, slice): + return self.__getsingleitem__(index) + return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore + + def __setitem__(self, index, value): + # type: (Any, Any) -> None + if not isinstance(index, slice): + return self.__setsingleitem__(index, value) + assert iter(value) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + if index.step is None: + del self[index.start : index.stop] + for elem in reversed(value): + self.insert(0 if index.start is None else index.start, elem) + else: + range_parms = index.indices(len(self)) + nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[ + 2 + ] + 1 + # need to test before changing, in case TypeError is caught + if nr_assigned_items < len(value): + raise TypeError( + 'too many elements in value {} < {}'.format( + nr_assigned_items, len(value) + ) + ) + elif nr_assigned_items > len(value): + raise TypeError( + 'not enough elements in value {} > {}'.format( + nr_assigned_items, len(value) + ) + ) + for idx, i in enumerate(range(*range_parms)): + self[i] = value[idx] + + def __delitem__(self, index): + # type: (Any) -> None + if not isinstance(index, slice): + return self.__delsingleitem__(index) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + for i in reversed(range(*index.indices(len(self)))): + del self[i] + + @abstractmethod + def __getsingleitem__(self, index): + # type: (Any) -> Any + raise IndexError + + @abstractmethod + def __setsingleitem__(self, index, value): + # type: (Any, Any) -> None + raise IndexError + + @abstractmethod + def __delsingleitem__(self, index): + # type: (Any) -> None + raise IndexError diff --git a/lib/ruyaml/composer.py b/lib/ruyaml/composer.py new file mode 100644 index 0000000..336d142 --- /dev/null +++ b/lib/ruyaml/composer.py @@ -0,0 +1,242 @@ +# coding: utf-8 + +import warnings +from typing import Any, Dict + +from ruyaml.compat import _F, nprint, nprintf # NOQA +from ruyaml.error import MarkedYAMLError, ReusedAnchorWarning +from ruyaml.events import ( + AliasEvent, + MappingEndEvent, + MappingStartEvent, + ScalarEvent, + SequenceEndEvent, + SequenceStartEvent, + StreamEndEvent, + StreamStartEvent, +) +from ruyaml.nodes import MappingNode, ScalarNode, SequenceNode + +__all__ = ['Composer', 'ComposerError'] + + +class ComposerError(MarkedYAMLError): + pass + + +class Composer: + def __init__(self, loader=None): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_composer', None) is None: + self.loader._composer = self + self.anchors = {} # type: Dict[Any, Any] + + @property + def parser(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + self.loader.parser # type: ignore + return self.loader._parser # type: ignore + + @property + def resolver(self): + # type: () -> Any + # assert self.loader._resolver is not None + if hasattr(self.loader, 'typ'): + self.loader.resolver # type: ignore + return self.loader._resolver # type: ignore + + def check_node(self): + # type: () -> Any + # Drop the STREAM-START event. + if self.parser.check_event(StreamStartEvent): + self.parser.get_event() + + # If there are more documents available? + return not self.parser.check_event(StreamEndEvent) + + def get_node(self): + # type: () -> Any + # Get the root node of the next document. + if not self.parser.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # type: () -> Any + # Drop the STREAM-START event. + self.parser.get_event() + + # Compose a document if the stream is not empty. + document = None # type: Any + if not self.parser.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.parser.check_event(StreamEndEvent): + event = self.parser.get_event() + raise ComposerError( + 'expected a single document in the stream', + document.start_mark, + 'but found another document', + event.start_mark, + ) + + # Drop the STREAM-END event. + self.parser.get_event() + + return document + + def compose_document(self): + # type: (Any) -> Any + # Drop the DOCUMENT-START event. + self.parser.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.parser.get_event() + + self.anchors = {} + return node + + def return_alias(self, a): + # type: (Any) -> Any + return a + + def compose_node(self, parent, index): + # type: (Any, Any) -> Any + if self.parser.check_event(AliasEvent): + event = self.parser.get_event() + alias = event.anchor + if alias not in self.anchors: + raise ComposerError( + None, + None, + _F('found undefined alias {alias!r}', alias=alias), + event.start_mark, + ) + return self.return_alias(self.anchors[alias]) + event = self.parser.peek_event() + anchor = event.anchor + if anchor is not None: # have an anchor + if anchor in self.anchors: + # raise ComposerError( + # "found duplicate anchor %r; first occurrence" + # % (anchor), self.anchors[anchor].start_mark, + # "second occurrence", event.start_mark) + ws = ( + '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence ' + '{}'.format( + (anchor), self.anchors[anchor].start_mark, event.start_mark + ) + ) + warnings.warn(ws, ReusedAnchorWarning) + self.resolver.descend_resolver(parent, index) + if self.parser.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.parser.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.parser.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.resolver.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + # type: (Any) -> Any + event = self.parser.get_event() + tag = event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode( + tag, + event.value, + event.start_mark, + event.end_mark, + style=event.style, + comment=event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.parser.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + if node.comment is not None: + nprint( + 'Warning: unexpected end_event commment in sequence ' + 'node {}'.format(node.flow_style) + ) + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def compose_mapping_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolver.resolve(MappingNode, None, start_event.implicit) + node = MappingNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + while not self.parser.check_event(MappingEndEvent): + # key_event = self.parser.peek_event() + item_key = self.compose_node(node, None) + # if item_key in node.value: + # raise ComposerError("while composing a mapping", + # start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + # node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def check_end_doc_comment(self, end_event, node): + # type: (Any, Any) -> None + if end_event.comment and end_event.comment[1]: + # pre comments on an end_event, no following to move to + if node.comment is None: + node.comment = [None, None] + assert not isinstance(node, ScalarEvent) + # this is a post comment on a mapping node, add as third element + # in the list + node.comment.append(end_event.comment[1]) + end_event.comment[1] = None diff --git a/lib/ruyaml/configobjwalker.py b/lib/ruyaml/configobjwalker.py new file mode 100644 index 0000000..ca916cc --- /dev/null +++ b/lib/ruyaml/configobjwalker.py @@ -0,0 +1,14 @@ +# coding: utf-8 + +import warnings + +from ruyaml.util import configobj_walker as new_configobj_walker + +if False: # MYPY + from typing import Any # NOQA + + +def configobj_walker(cfg): + # type: (Any) -> Any + warnings.warn('configobj_walker has moved to ruyaml.util, please update your code') + return new_configobj_walker(cfg) diff --git a/lib/ruyaml/constructor.py b/lib/ruyaml/constructor.py new file mode 100644 index 0000000..3931f39 --- /dev/null +++ b/lib/ruyaml/constructor.py @@ -0,0 +1,1920 @@ +# coding: utf-8 + +import base64 +import binascii +import datetime +import sys +import types +import warnings +from collections.abc import Hashable, MutableMapping, MutableSequence # type: ignore + +from ruyaml.comments import * # NOQA +from ruyaml.comments import ( + C_KEY_EOL, + C_KEY_POST, + C_KEY_PRE, + C_VALUE_EOL, + C_VALUE_POST, + C_VALUE_PRE, + CommentedKeyMap, + CommentedKeySeq, + CommentedMap, + CommentedOrderedMap, + CommentedSeq, + CommentedSet, + TaggedScalar, +) +from ruyaml.compat import builtins_module # NOQA +from ruyaml.compat import ordereddict # type: ignore +from ruyaml.compat import _F, nprintf + +# fmt: off +from ruyaml.error import ( + MantissaNoDotYAML1_1Warning, + MarkedYAMLError, + MarkedYAMLFutureWarning, +) +from ruyaml.nodes import * # NOQA +from ruyaml.nodes import MappingNode, ScalarNode, SequenceNode +from ruyaml.scalarbool import ScalarBoolean +from ruyaml.scalarfloat import ScalarFloat +from ruyaml.scalarint import BinaryInt, HexCapsInt, HexInt, OctalInt, ScalarInt +from ruyaml.scalarstring import ( + DoubleQuotedScalarString, + FoldedScalarString, + LiteralScalarString, + PlainScalarString, + ScalarString, + SingleQuotedScalarString, +) +from ruyaml.timestamp import TimeStamp +from ruyaml.util import create_timestamp, timestamp_regexp + +if False: # MYPY + from typing import Any, Dict, Generator, List, Optional, Set, Union # NOQA + + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError', 'RoundTripConstructor'] +# fmt: on + + +class ConstructorError(MarkedYAMLError): + pass + + +class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning): + pass + + +class DuplicateKeyError(MarkedYAMLError): + pass + + +class BaseConstructor: + + yaml_constructors = {} # type: Dict[Any, Any] + yaml_multi_constructors = {} # type: Dict[Any, Any] + + def __init__(self, preserve_quotes=None, loader=None): + # type: (Optional[bool], Any) -> None + self.loader = loader + if ( + self.loader is not None + and getattr(self.loader, '_constructor', None) is None + ): + self.loader._constructor = self + self.loader = loader + self.yaml_base_dict_type = dict + self.yaml_base_list_type = list + self.constructed_objects = {} # type: Dict[Any, Any] + self.recursive_objects = {} # type: Dict[Any, Any] + self.state_generators = [] # type: List[Any] + self.deep_construct = False + self._preserve_quotes = preserve_quotes + self.allow_duplicate_keys = False + + @property + def composer(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.composer # type: ignore + try: + return self.loader._composer # type: ignore + except AttributeError: + sys.stdout.write('slt {}\n'.format(type(self))) + sys.stdout.write('slc {}\n'.format(self.loader._composer)) # type: ignore + sys.stdout.write('{}\n'.format(dir(self))) + raise + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver # type: ignore + return self.loader._resolver # type: ignore + + @property + def scanner(self): + # type: () -> Any + # needed to get to the expanded comments + if hasattr(self.loader, 'typ'): + return self.loader.scanner # type: ignore + return self.loader._scanner # type: ignore + + def check_data(self): + # type: () -> Any + # If there are more documents available? + return self.composer.check_node() + + def get_data(self): + # type: () -> Any + # Construct and return the next document. + if self.composer.check_node(): + return self.construct_document(self.composer.get_node()) + + def get_single_data(self): + # type: () -> Any + # Ensure that the stream contains a single document and construct it. + node = self.composer.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + # type: (Any) -> Any + data = self.construct_object(node) + while bool(self.state_generators): + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for _dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + return self.recursive_objects[node] + # raise ConstructorError( + # None, None, 'found unconstructable recursive node', node.start_mark + # ) + self.recursive_objects[node] = None + data = self.construct_non_recursive_object(node) + + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_non_recursive_object(self, node, tag=None): + # type: (Any, Optional[str]) -> Any + constructor = None # type: Any + tag_suffix = None + if tag is None: + tag = node.tag + if tag in self.yaml_constructors: + constructor = self.yaml_constructors[tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag.startswith(tag_prefix): + tag_suffix = tag[len(tag_prefix) :] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for _dummy in generator: + pass + else: + self.state_generators.append(generator) + return data + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, + None, + _F('expected a scalar node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + return node.value + + def construct_sequence(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, + None, + _F('expected a sequence node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + return [self.construct_object(child, deep=deep) for child in node.value] + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + total_mapping = self.yaml_base_dict_type() + if getattr(node, 'merge', None) is not None: + todo = [(node.merge, False), (node.value, False)] + else: + todo = [(node.value, True)] + for values, check in todo: + mapping = self.yaml_base_dict_type() # type: Dict[Any, Any] + for key_node, value_node in values: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + + value = self.construct_object(value_node, deep=deep) + if check: + if self.check_mapping_key(node, key_node, mapping, key, value): + mapping[key] = value + else: + mapping[key] = value + total_mapping.update(mapping) + return total_mapping + + def check_mapping_key(self, node, key_node, mapping, key, value): + # type: (Any, Any, Any, Any, Any) -> bool + """return True if key is unique""" + if key in mapping: + if not self.allow_duplicate_keys: + mk = mapping.get(key) + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}" with value "{}" ' + '(original value: "{}")'.format(key, value, mk), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + return False + return True + + def check_set_key(self, node, key_node, setting, key): + # type: (Any, Any, Any, Any, Any) -> None + if key in setting: + if not self.allow_duplicate_keys: + args = [ + 'while constructing a set', + node.start_mark, + 'found duplicate key "{}"'.format(key), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + + def construct_pairs(self, node, deep=False): + # type: (Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + # type: (Any, Any) -> None + if 'yaml_constructors' not in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + # type: (Any, Any) -> None + if 'yaml_multi_constructors' not in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + + +class SafeConstructor(BaseConstructor): + def construct_scalar(self, node): + # type: (Any) -> Any + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == 'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + merge = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + if merge: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping for merging, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping or list of mappings for merging, ' + 'but found {value_node_id!s}', + value_node_id=value_node.id, + ), + value_node.start_mark, + ) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if bool(merge): + node.merge = ( + merge # separate merge keys to be able to update without duplicate + ) + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + # type: (Any) -> Any + self.construct_scalar(node) + return None + + # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does + bool_values = { + 'yes': True, + 'no': False, + 'y': True, + 'n': False, + 'true': True, + 'false': False, + 'on': True, + 'off': False, + } + + def construct_yaml_bool(self, node): + # type: (Any) -> bool + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + # type: (Any) -> int + value_s = self.construct_scalar(node) + value_s = value_s.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + return sign * int(value_s[2:], 2) + elif value_s.startswith('0x'): + return sign * int(value_s[2:], 16) + elif value_s.startswith('0o'): + return sign * int(value_s[2:], 8) + elif self.resolver.processing_version == (1, 1) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version == (1, 1) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + return sign * int(value_s) + + inf_value = 1e300 + while inf_value != inf_value * inf_value: + inf_value *= inf_value + nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + # type: (Any) -> float + value_so = self.construct_scalar(node) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + elif value_s == '.nan': + return self.nan_value + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + if self.resolver.processing_version != (1, 2) and 'e' in value_s: + # value_s is lower case independent of input + mantissa, exponent = value_s.split('e') + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + return sign * float(value_s) + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + _F('failed to convert base64 data into ascii: {exc!s}', exc=exc), + node.start_mark, + ) + try: + return base64.decodebytes(value) + except binascii.Error as exc: + raise ConstructorError( + None, + None, + _F('failed to decode base64 data: {exc!s}', exc=exc), + node.start_mark, + ) + + timestamp_regexp = timestamp_regexp # moved to util 0.17.17 + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + if values is None: + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + return create_timestamp(**values) + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = ordereddict() + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + omap[key] = value + + def construct_yaml_pairs(self, node): + # type: (Any) -> Any + # Note: the same code as `construct_yaml_omap`. + pairs = [] # type: List[Any] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = set() # type: Set[Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + return value + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = self.yaml_base_list_type() # type: List[Any] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = self.yaml_base_dict_type() # type: Dict[Any, Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + # type: (Any) -> None + raise ConstructorError( + None, + None, + _F( + 'could not determine a constructor for the tag {node_tag!r}', + node_tag=node.tag, + ), + node.start_mark, + ) + + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq +) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map +) + +SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) + + +class Constructor(SafeConstructor): + def construct_python_str(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + def construct_python_unicode(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + def construct_python_bytes(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + _F('failed to convert base64 data into ascii: {exc!s}', exc=exc), + node.start_mark, + ) + try: + return base64.decodebytes(value) + except binascii.Error as exc: + raise ConstructorError( + None, + None, + _F('failed to decode base64 data: {exc!s}', exc=exc), + node.start_mark, + ) + + def construct_python_long(self, node): + # type: (Any) -> int + val = self.construct_yaml_int(node) + return val + + def construct_python_complex(self, node): + # type: (Any) -> Any + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + # type: (Any) -> Any + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + try: + __import__(name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python module', + mark, + _F('cannot find module {name!r} ({exc!s})', name=name, exc=exc), + mark, + ) + return sys.modules[name] + + def find_python_name(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + if '.' in name: + lname = name.split('.') + lmodule_name = lname + lobject_name = [] # type: List[Any] + while len(lmodule_name) > 1: + lobject_name.insert(0, lmodule_name.pop()) + module_name = '.'.join(lmodule_name) + try: + __import__(module_name) + # object_name = '.'.join(object_name) + break + except ImportError: + continue + else: + module_name = builtins_module + lobject_name = [name] + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python object', + mark, + _F( + 'cannot find module {module_name!r} ({exc!s})', + module_name=module_name, + exc=exc, + ), + mark, + ) + module = sys.modules[module_name] + object_name = '.'.join(lobject_name) + obj = module + while lobject_name: + if not hasattr(obj, lobject_name[0]): + + raise ConstructorError( + 'while constructing a Python object', + mark, + _F( + 'cannot find {object_name!r} in the module {module_name!r}', + object_name=object_name, + module_name=module.__name__, + ), + mark, + ) + obj = getattr(obj, lobject_name.pop(0)) + return obj + + def construct_python_name(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python name', + node.start_mark, + _F('expected the empty value, but found {value!r}', value=value), + node.start_mark, + ) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python module', + node.start_mark, + _F('expected the empty value, but found {value!r}', value=value), + node.start_mark, + ) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + # type: (Any, Any, Any, Any, bool) -> Any + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + # type: (Any, Any) -> None + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} # type: Dict[Any, Any] + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # type: (Any, Any) -> Any + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + self.recursive_objects[node] = instance + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # type: (Any, Any, bool) -> Any + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} # type: Dict[Any, Any] + state = {} # type: Dict[Any, Any] + listitems = [] # type: List[Any] + dictitems = {} # type: Dict[Any, Any] + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if bool(state): + self.set_python_instance_state(instance, state) + if bool(listitems): + instance.extend(listitems) + if bool(dictitems): + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + # type: (Any, Any) -> Any + return self.construct_python_object_apply(suffix, node, newobj=True) + + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/str', Constructor.construct_python_str +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/long', Constructor.construct_python_long +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple +) + +Constructor.add_constructor( + 'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/name:', Constructor.construct_python_name +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', Constructor.construct_python_module +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', Constructor.construct_python_object +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply +) + +Constructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new +) + + +class RoundTripConstructor(SafeConstructor): + """need to store the comments on the node itself, + as well as on the items + """ + + def comment(self, idx): + # type: (Any) -> Any + assert self.loader.comment_handling is not None # type: ignore + x = self.scanner.comments[idx] + x.set_assigned() + return x + + def comments(self, list_of_comments, idx=None): + # type: (Any, Optional[Any]) -> Any + # hand in the comment and optional pre, eol, post segment + if list_of_comments is None: + return [] + if idx is not None: + if list_of_comments[idx] is None: + return [] + list_of_comments = list_of_comments[idx] + for x in list_of_comments: + yield self.comment(x) + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, + None, + _F('expected a scalar node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + + if node.style == '|' and isinstance(node.value, str): + lss = LiteralScalarString(node.value, anchor=node.anchor) + if self.loader and self.loader.comment_handling is None: + if node.comment and node.comment[1]: + lss.comment = node.comment[1][0] # type: ignore + else: + # NEWCMNT + if node.comment is not None and node.comment[1]: + # nprintf('>>>>nc1', node.comment) + # EOL comment after | + lss.comment = self.comment(node.comment[1][0]) # type: ignore + return lss + if node.style == '>' and isinstance(node.value, str): + fold_positions = [] # type: List[int] + idx = -1 + while True: + idx = node.value.find('\a', idx + 1) + if idx < 0: + break + fold_positions.append(idx - len(fold_positions)) + fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor) + if self.loader and self.loader.comment_handling is None: + if node.comment and node.comment[1]: + fss.comment = node.comment[1][0] # type: ignore + else: + # NEWCMNT + if node.comment is not None and node.comment[1]: + # nprintf('>>>>nc2', node.comment) + # EOL comment after > + lss.comment = self.comment(node.comment[1][0]) # type: ignore + if fold_positions: + fss.fold_pos = fold_positions # type: ignore + return fss + elif bool(self._preserve_quotes) and isinstance(node.value, str): + if node.style == "'": + return SingleQuotedScalarString(node.value, anchor=node.anchor) + if node.style == '"': + return DoubleQuotedScalarString(node.value, anchor=node.anchor) + if node.anchor: + return PlainScalarString(node.value, anchor=node.anchor) + return node.value + + def construct_yaml_int(self, node): + # type: (Any) -> Any + width = None # type: Any + value_su = self.construct_scalar(node) + try: + sx = value_su.rstrip('_') + underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any + except ValueError: + underscore = None + except IndexError: + underscore = None + value_s = value_su.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return BinaryInt( + sign * int(value_s[2:], 2), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0x'): + # default to lower-case if no a-fA-F in string + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + hex_fun = HexInt # type: Any + for ch in value_s[2:]: + if ch in 'ABCDEF': # first non-digit is capital + hex_fun = HexCapsInt + break + if ch in 'abcdef': + break + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return hex_fun( + sign * int(value_s[2:], 16), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0o'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return OctalInt( + sign * int(value_s[2:], 8), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif self.resolver.processing_version != (1, 2) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + elif self.resolver.processing_version > (1, 1) and value_s[0] == '0': + # not an octal, an integer with leading zero(s) + if underscore is not None: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt( + sign * int(value_s), width=len(value_s), underscore=underscore + ) + elif underscore: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt( + sign * int(value_s), + width=None, + underscore=underscore, + anchor=node.anchor, + ) + elif node.anchor: + return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor) + else: + return sign * int(value_s) + + def construct_yaml_float(self, node): + # type: (Any) -> Any + def leading_zeros(v): + # type: (Any) -> int + lead0 = 0 + idx = 0 + while idx < len(v) and v[idx] in '0.': + if v[idx] == '0': + lead0 += 1 + idx += 1 + return lead0 + + # underscore = None + m_sign = False # type: Any + value_so = self.construct_scalar(node) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + m_sign = value_s[0] + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + if value_s == '.nan': + return self.nan_value + if self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + if 'e' in value_s: + try: + mantissa, exponent = value_so.split('e') + exp = 'e' + except ValueError: + mantissa, exponent = value_so.split('E') + exp = 'E' + if self.resolver.processing_version != (1, 2): + # value_s is lower case independent of input + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + lead0 = leading_zeros(mantissa) + width = len(mantissa) + prec = mantissa.find('.') + if m_sign: + width -= 1 + e_width = len(exponent) + e_sign = exponent[0] in '+-' + # nprint('sf', width, prec, m_sign, exp, e_width, e_sign) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + exp=exp, + e_width=e_width, + e_sign=e_sign, + anchor=node.anchor, + ) + width = len(value_so) + prec = value_so.index( + '.' + ) # you can use index, this would not be float without dot + lead0 = leading_zeros(value_so) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + anchor=node.anchor, + ) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if isinstance(value, ScalarString): + return value + return value + + def construct_rt_sequence(self, node, seqtyp, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, + None, + _F('expected a sequence node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + ret_val = [] + if self.loader and self.loader.comment_handling is None: + if node.comment: + seqtyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + # this happens e.g. if you have a sequence element that is a flow-style + # mapping and that has no EOL comment but a following commentline or + # empty line + seqtyp.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc3', node.comment) + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + seqtyp.yaml_set_anchor(node.anchor) + for idx, child in enumerate(node.value): + if child.comment: + seqtyp._yaml_add_comment(child.comment, key=idx) + child.comment = None # if moved to sequence remove from child + ret_val.append(self.construct_object(child, deep=deep)) + seqtyp._yaml_set_idx_line_col( + idx, [child.start_mark.line, child.start_mark.column] + ) + return ret_val + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + + def constructed(value_node): + # type: (Any) -> Any + # If the contents of a merge are defined within the + # merge marker, then they won't have been constructed + # yet. But if they were already constructed, we need to use + # the existing object. + if value_node in self.constructed_objects: + value = self.constructed_objects[value_node] + else: + value = self.construct_object(value_node, deep=False) + return value + + # merge = [] + merge_map_list = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge_map_list and not self.allow_duplicate_keys: + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + merge_map_list.append((index, constructed(value_node))) + # self.flatten_mapping(value_node) + # merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + # submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping for merging, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + merge_map_list.append((index, constructed(subnode))) + # self.flatten_mapping(subnode) + # submerge.append(subnode.value) + # submerge.reverse() + # for value in submerge: + # merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + _F( + 'expected a mapping or list of mappings for merging, ' + 'but found {value_node_id!s}', + value_node_id=value_node.id, + ), + value_node.start_mark, + ) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + return merge_map_list + # if merge: + # node.value = merge + node.value + + def _sentinel(self): + # type: () -> None + pass + + def construct_mapping(self, node, maptyp, deep=False): # type: ignore + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + merge_map = self.flatten_mapping(node) + # mapping = {} + if self.loader and self.loader.comment_handling is None: + if node.comment: + maptyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + maptyp.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + # nprintf('nc4', node.comment, node.start_mark) + if maptyp.ca.pre is None: + maptyp.ca.pre = [] + for cmnt in self.comments(node.comment, 0): + maptyp.ca.pre.append(cmnt) + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + maptyp.yaml_set_anchor(node.anchor) + last_key, last_value = None, self._sentinel + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, MutableSequence): + key_s = CommentedKeySeq(key) + if key_node.flow_style is True: + key_s.fa.set_flow_style() + elif key_node.flow_style is False: + key_s.fa.set_block_style() + key = key_s + elif isinstance(key, MutableMapping): + key_m = CommentedKeyMap(key) + if key_node.flow_style is True: + key_m.fa.set_flow_style() + elif key_node.flow_style is False: + key_m.fa.set_block_style() + key = key_m + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + value = self.construct_object(value_node, deep=deep) + if self.check_mapping_key(node, key_node, maptyp, key, value): + if self.loader and self.loader.comment_handling is None: + if ( + key_node.comment + and len(key_node.comment) > 4 + and key_node.comment[4] + ): + if last_value is None: + key_node.comment[0] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, value=last_key) + else: + key_node.comment[2] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, key=key) + key_node.comment = None + if key_node.comment: + maptyp._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + maptyp._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc5a', key, key_node.comment) + if key_node.comment[0]: + maptyp.ca.set(key, C_KEY_PRE, key_node.comment[0]) + if key_node.comment[1]: + maptyp.ca.set(key, C_KEY_EOL, key_node.comment[1]) + if key_node.comment[2]: + maptyp.ca.set(key, C_KEY_POST, key_node.comment[2]) + if value_node.comment: + nprintf('nc5b', key, value_node.comment) + if value_node.comment[0]: + maptyp.ca.set(key, C_VALUE_PRE, value_node.comment[0]) + if value_node.comment[1]: + maptyp.ca.set(key, C_VALUE_EOL, value_node.comment[1]) + if value_node.comment[2]: + maptyp.ca.set(key, C_VALUE_POST, value_node.comment[2]) + maptyp._yaml_set_kv_line_col( + key, + [ + key_node.start_mark.line, + key_node.start_mark.column, + value_node.start_mark.line, + value_node.start_mark.column, + ], + ) + maptyp[key] = value + last_key, last_value = key, value # could use indexing + # do this last, or <<: before a key will prevent insertion in instances + # of collections.OrderedDict (as they have no __contains__ + if merge_map: + maptyp.add_yaml_merge(merge_map) + + def construct_setting(self, node, typ, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + _F('expected a mapping node, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + if self.loader and self.loader.comment_handling is None: + if node.comment: + typ._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + typ.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc6', node.comment) + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + typ.yaml_set_anchor(node.anchor) + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + # construct but should be null + value = self.construct_object(value_node, deep=deep) # NOQA + self.check_set_key(node, key_node, typ, key) + if self.loader and self.loader.comment_handling is None: + if key_node.comment: + typ._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + typ._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc7a', key_node.comment) + if value_node.comment: + nprintf('nc7b', value_node.comment) + typ.add(key) + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = CommentedSeq() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + # if node.comment: + # data._yaml_add_comment(node.comment) + yield data + data.extend(self.construct_rt_sequence(node, data)) + self.set_collection_style(data, node) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_mapping(node, data, deep=True) + self.set_collection_style(data, node) + + def set_collection_style(self, data, node): + # type: (Any, Any) -> None + if len(data) == 0: + return + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = SafeConstructor.construct_mapping(self, node, deep=True) + data.__setstate__(state) + else: + state = SafeConstructor.construct_mapping(self, node) + if hasattr(data, '__attrs_attrs__'): # issue 394 + data.__init__(**state) + else: + data.__dict__.update(state) + if node.anchor: + from ruyaml.anchor import Anchor + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + if not hasattr(data, Anchor.attrib): + a = Anchor() + setattr(data, Anchor.attrib, a) + else: + a = getattr(data, Anchor.attrib) + a.value = node.anchor + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = CommentedOrderedMap() + omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + omap.fa.set_flow_style() + elif node.flow_style is False: + omap.fa.set_block_style() + yield omap + if self.loader and self.loader.comment_handling is None: + if node.comment: + omap._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + omap.yaml_end_comment_extend(node.comment[2], clear=True) + else: + # NEWCMNT + if node.comment: + nprintf('nc8', node.comment) + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F('expected a sequence, but found {node_id!s}', node_id=node.id), + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a mapping of length 1, but found {subnode_id!s}', + subnode_id=subnode.id, + ), + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + _F( + 'expected a single mapping item, but found {len_subnode_val:d} items', + len_subnode_val=len(subnode.value), + ), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + if self.loader and self.loader.comment_handling is None: + if key_node.comment: + omap._yaml_add_comment(key_node.comment, key=key) + if subnode.comment: + omap._yaml_add_comment(subnode.comment, key=key) + if value_node.comment: + omap._yaml_add_comment(value_node.comment, value=key) + else: + # NEWCMNT + if key_node.comment: + nprintf('nc9a', key_node.comment) + if subnode.comment: + nprintf('nc9b', subnode.comment) + if value_node.comment: + nprintf('nc9c', value_node.comment) + omap[key] = value + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = CommentedSet() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_setting(node, data) + + def construct_undefined(self, node): + # type: (Any) -> Any + try: + if isinstance(node, MappingNode): + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + data.yaml_set_tag(node.tag) + yield data + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + data.yaml_set_anchor(node.anchor) + self.construct_mapping(node, data) + return + elif isinstance(node, ScalarNode): + data2 = TaggedScalar() + data2.value = self.construct_scalar(node) + data2.style = node.style + data2.yaml_set_tag(node.tag) + yield data2 + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + data2.yaml_set_anchor(node.anchor, always_dump=True) + return + elif isinstance(node, SequenceNode): + data3 = CommentedSeq() + data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data3.fa.set_flow_style() + elif node.flow_style is False: + data3.fa.set_block_style() + data3.yaml_set_tag(node.tag) + yield data3 + if node.anchor: + from ruyaml.serializer import templated_id + + if not templated_id(node.anchor): + data3.yaml_set_anchor(node.anchor) + data3.extend(self.construct_sequence(node)) + return + except: # NOQA + pass + raise ConstructorError( + None, + None, + _F( + 'could not determine a constructor for the tag {node_tag!r}', + node_tag=node.tag, + ), + node.start_mark, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + if not values['hour']: + return create_timestamp(**values) + # return SafeConstructor.construct_yaml_timestamp(self, node, values) + for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']: + if values[part]: + break + else: + return create_timestamp(**values) + # return SafeConstructor.construct_yaml_timestamp(self, node, values) + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction_s = values['fraction'][:6] + while len(fraction_s) < 6: + fraction_s += '0' + fraction = int(fraction_s) + if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4: + fraction += 1 + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # should check for None and solve issue 366 should be tzinfo=delta) + if delta: + dt = datetime.datetime(year, month, day, hour, minute) + dt -= delta + data = TimeStamp( + dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction + ) + data._yaml['delta'] = delta + tz = values['tz_sign'] + values['tz_hour'] + if values['tz_minute']: + tz += ':' + values['tz_minute'] + data._yaml['tz'] = tz + else: + data = TimeStamp(year, month, day, hour, minute, second, fraction) + if values['tz']: # no delta + data._yaml['tz'] = values['tz'] + + if values['t']: + data._yaml['t'] = True + return data + + def construct_yaml_bool(self, node): + # type: (Any) -> Any + b = SafeConstructor.construct_yaml_bool(self, node) + if node.anchor: + return ScalarBoolean(b, anchor=node.anchor) + return b + + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq +) + +RoundTripConstructor.add_constructor( + 'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map +) + +RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined) diff --git a/lib/ruyaml/cyaml.py b/lib/ruyaml/cyaml.py new file mode 100644 index 0000000..ae3c7e0 --- /dev/null +++ b/lib/ruyaml/cyaml.py @@ -0,0 +1,191 @@ +# coding: utf-8 + +from _ruyaml import CEmitter, CParser # type: ignore + +from ruyaml.constructor import BaseConstructor, Constructor, SafeConstructor +from ruyaml.representer import BaseRepresenter, Representer, SafeRepresenter +from ruyaml.resolver import BaseResolver, Resolver + +if False: # MYPY + from typing import Any, Optional, Union # NOQA + + from ruyaml.compat import StreamTextType, StreamType, VersionType # NOQA + +__all__ = [ + 'CBaseLoader', + 'CSafeLoader', + 'CLoader', + 'CBaseDumper', + 'CSafeDumper', + 'CDumper', +] + + +# this includes some hacks to solve the usage of resolver by lower level +# parts of the parser + + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + BaseConstructor.__init__(self, loader=self) + BaseResolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + SafeConstructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CLoader(CParser, Constructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + Constructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + self._emitter = self._serializer = self._representer = self + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + SafeRepresenter.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) + + +class CDumper(CEmitter, Representer, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + Representer.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) diff --git a/lib/ruyaml/dumper.py b/lib/ruyaml/dumper.py new file mode 100644 index 0000000..a8a287f --- /dev/null +++ b/lib/ruyaml/dumper.py @@ -0,0 +1,225 @@ +# coding: utf-8 + +from ruyaml.emitter import Emitter +from ruyaml.representer import ( + BaseRepresenter, + Representer, + RoundTripRepresenter, + SafeRepresenter, +) +from ruyaml.resolver import BaseResolver, Resolver, VersionedResolver +from ruyaml.serializer import Serializer + +if False: # MYPY + from typing import Any, Dict, List, Optional, Union # NOQA + + from ruyaml.compat import StreamType, VersionType # NOQA + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper'] + + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + sort_keys=False, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Optional[bool], Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + sort_keys=False, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Optional[bool], Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class Dumper(Emitter, Serializer, Representer, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + sort_keys=False, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Optional[bool], Any, Any, Any) -> None # NOQA + if sort_keys: + raise NotImplementedError + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + Representer.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + RoundTripRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + VersionedResolver.__init__(self, loader=self) diff --git a/lib/ruyaml/emitter.py b/lib/ruyaml/emitter.py new file mode 100644 index 0000000..d5fe1a1 --- /dev/null +++ b/lib/ruyaml/emitter.py @@ -0,0 +1,1797 @@ +# coding: utf-8 + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +import sys + +# fmt: off +from ruyaml.compat import ( # NOQA + _F, + DBG_EVENT, + check_anchorname_char, + dbg, + nprint, + nprintf, +) +from ruyaml.error import YAMLError, YAMLStreamError +from ruyaml.events import * # NOQA + +# fmt: on + +if False: # MYPY + from typing import Any, Dict, List, Optional, Text, Tuple, Union # NOQA + + from ruyaml.compat import StreamType # NOQA + +__all__ = ['Emitter', 'EmitterError'] + + +class EmitterError(YAMLError): + pass + + +class ScalarAnalysis: + def __init__( + self, + scalar, + empty, + multiline, + allow_flow_plain, + allow_block_plain, + allow_single_quoted, + allow_double_quoted, + allow_block, + ): + # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + + +class Indents: + # replacement for the list based stack of None/int + def __init__(self): + # type: () -> None + self.values = [] # type: List[Tuple[int, bool]] + + def append(self, val, seq): + # type: (Any, Any) -> None + self.values.append((val, seq)) + + def pop(self): + # type: () -> Any + return self.values.pop()[0] + + def last_seq(self): + # type: () -> bool + # return the seq(uence) value for the element added before the last one + # in increase_indent() + try: + return self.values[-2][1] + except IndexError: + return False + + def seq_flow_align(self, seq_indent, column): + # type: (int, int) -> int + # extra spaces because of dash + if len(self.values) < 2 or not self.values[-1][1]: + return 0 + # -1 for the dash + base = self.values[-1][0] if self.values[-1][0] is not None else 0 + return base + seq_indent - column - 1 + + def __len__(self): + # type: () -> int + return len(self.values) + + +class Emitter: + # fmt: off + DEFAULT_TAG_PREFIXES = { + '!': '!', + 'tag:yaml.org,2002:': '!!', + } + # fmt: on + + MAX_SIMPLE_KEY_LENGTH = 128 + + def __init__( + self, + stream, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + brace_single_entry_mapping_in_flow_sequence=None, + dumper=None, + ): + # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None: + self.dumper._emitter = self + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None # type: Optional[Text] + self.allow_space_break = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] # type: List[Any] + self.state = self.expect_stream_start # type: Any + + # Current event and the event queue. + self.events = [] # type: List[Any] + self.event = None # type: Any + + # The current indentation level and the stack of previous indents. + self.indents = Indents() + self.indent = None # type: Optional[int] + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + self.compact_seq_seq = True # dash after dash + self.compact_seq_map = True # key after dash + # self.compact_ms = False # dash after key, only when excplicit key with ? + self.no_newline = None # type: Optional[bool] # set if directly after `- ` + + # Whether the document requires an explicit document end indicator + self.open_ended = False + + # colon handling + self.colon = ':' + self.prefixed_colon = ( + self.colon if prefix_colon is None else prefix_colon + self.colon + ) + # single entry mappings in flow sequence + self.brace_single_entry_mapping_in_flow_sequence = ( + brace_single_entry_mapping_in_flow_sequence # NOQA + ) + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis + self.unicode_supplementary = sys.maxunicode > 0xFFFF + self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0 + self.top_level_colon_align = top_level_colon_align + self.best_sequence_indent = 2 + self.requested_indent = indent # specific for literal zero indent + if indent and 1 < indent < 10: + self.best_sequence_indent = indent + self.best_map_indent = self.best_sequence_indent + # if self.best_sequence_indent < self.sequence_dash_offset + 1: + # self.best_sequence_indent = self.sequence_dash_offset + 1 + self.best_width = 80 + if width and width > self.best_sequence_indent * 2: + self.best_width = width + self.best_line_break = '\n' # type: Any + if line_break in ['\r', '\n', '\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None # type: Any + + # Prepared anchor and tag. + self.prepared_anchor = None # type: Any + self.prepared_tag = None # type: Any + + # Scalar analysis and style. + self.analysis = None # type: Any + self.style = None # type: Any + + self.scalar_after_indicator = True # write a scalar on the same line as `---` + + self.alt_null = 'null' + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('output stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + if not hasattr(val, 'write'): + raise YAMLStreamError('stream argument needs to have a write() method') + self._stream = val + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer # type: ignore + return self.dumper._serializer # type: ignore + except AttributeError: + return self # cyaml + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def dispose(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + # type: (Any) -> None + if dbg(DBG_EVENT): + nprint(event) + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + # type: () -> bool + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + # type: (int) -> bool + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return len(self.events) < count + 1 + + def increase_indent(self, flow=False, sequence=None, indentless=False): + # type: (bool, Optional[bool], bool) -> None + self.indents.append(self.indent, sequence) + if self.indent is None: # top level + if flow: + # self.indent = self.best_sequence_indent if self.indents.last_seq() else \ + # self.best_map_indent + # self.indent = self.best_sequence_indent + self.indent = self.requested_indent + else: + self.indent = 0 + elif not indentless: + self.indent += ( + self.best_sequence_indent + if self.indents.last_seq() + else self.best_map_indent + ) + # if self.indents.last_seq(): + # if self.indent == 0: # top level block sequence + # self.indent = self.best_sequence_indent - self.sequence_dash_offset + # else: + # self.indent += self.best_sequence_indent + # else: + # self.indent += self.best_map_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + # type: () -> None + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError( + _F( + 'expected StreamStartEvent, but got {self_event!s}', + self_event=self.event, + ) + ) + + def expect_nothing(self): + # type: () -> None + raise EmitterError( + _F('expected nothing, but got {self_event!s}', self_event=self.event) + ) + + # Document handlers. + + def expect_first_document_start(self): + # type: () -> Any + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + # type: (bool) -> None + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator('...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = ( + first + and not self.event.explicit + and not self.canonical + and not self.event.version + and not self.event.tags + and not self.check_empty_document() + ) + if not implicit: + self.write_indent() + self.write_indicator('---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator('...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError( + _F( + 'expected DocumentStartEvent, but got {self_event!s}', + self_event=self.event, + ) + ) + + def expect_document_end(self): + # type: () -> None + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator('...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError( + _F( + 'expected DocumentEndEvent, but got {self_event!s}', + self_event=self.event, + ) + ) + + def expect_document_root(self): + # type: () -> None + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): + # type: (bool, bool, bool, bool) -> None + self.root_context = root + self.sequence_context = sequence # not used in PyYAML + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + if ( + self.process_anchor('&') + and isinstance(self.event, ScalarEvent) + and self.sequence_context + ): + self.sequence_context = False + if ( + root + and isinstance(self.event, ScalarEvent) + and not self.scalar_after_indicator + ): + self.write_indent() + self.process_tag() + if isinstance(self.event, ScalarEvent): + # nprint('@', self.indention, self.no_newline, self.column) + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + # nprint('@', self.indention, self.no_newline, self.column) + i2, n2 = self.indention, self.no_newline # NOQA + if self.event.comment: + if self.event.flow_style is False and self.event.comment: + if self.write_post_comment(self.event): + self.indention = False + self.no_newline = True + if self.write_pre_comment(self.event): + self.indention = i2 + self.no_newline = not self.indention + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_sequence() + ): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.event.flow_style is False and self.event.comment: + self.write_post_comment(self.event) + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_mapping() + ): + self.expect_flow_mapping(single=self.event.nr_items == 1) + else: + self.expect_block_mapping() + else: + raise EmitterError( + _F('expected NodeEvent, but got {self_event!s}', self_event=self.event) + ) + + def expect_alias(self): + # type: () -> None + if self.event.anchor is None: + raise EmitterError('anchor is not specified for alias') + self.process_anchor('*') + self.state = self.states.pop() + + def expect_scalar(self): + # type: () -> None + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + # type: () -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + self.write_indicator(' ' * ind + '[', True, whitespace=True) + self.increase_indent(flow=True, sequence=True) + self.flow_context.append('[') + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + self.write_indicator(']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty flow sequence + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator(']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow sequence + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self, single=False): + # type: (Optional[bool]) -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + map_init = '{' + if ( + single + and self.flow_level + and self.flow_context[-1] == '[' + and not self.canonical + and not self.brace_single_entry_mapping_in_flow_sequence + ): + # single map item with flow context, no curly braces necessary + map_init = '' + self.write_indicator(' ' * ind + map_init, True, whitespace=True) + self.flow_context.append(map_init) + self.increase_indent(flow=True, sequence=False) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '{' # empty flow mapping + self.write_indicator('}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty mapping + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + # if self.event.comment and self.event.comment[1]: + # self.write_pre_comment(self.event) + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped in ['{', ''] + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + if popped != '': + self.write_indicator('}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow mapping, never reached on empty mappings + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + # type: () -> None + self.write_indicator(self.prefixed_colon, False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + # type: () -> None + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(self.prefixed_colon, True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + # type: () -> None + if self.mapping_context: + indentless = not self.indention + else: + indentless = False + if not self.compact_seq_seq and self.column != 0: + self.write_line_break() + self.increase_indent(flow=False, sequence=True, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + # type: () -> Any + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + # type: (bool) -> None + if not first and isinstance(self.event, SequenceEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments on a block list e.g. empty line + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + self.no_newline = False + else: + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + nonl = self.no_newline if self.column == 0 else False + self.write_indent() + ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0 + self.write_indicator(' ' * ind + '-', True, indention=True) + if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent: + self.no_newline = True + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + # type: () -> None + if not self.mapping_context and not (self.compact_seq_map or self.column == 0): + self.write_line_break() + self.increase_indent(flow=False, sequence=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + # type: () -> None + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + # type: (Any) -> None + if not first and isinstance(self.event, MappingEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.write_indent() + if self.check_simple_key(): + if not isinstance( + self.event, (SequenceStartEvent, MappingStartEvent) + ): # sequence keys + try: + if self.event.style == '?': + self.write_indicator('?', True, indention=True) + except AttributeError: # aliases have no style + pass + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + # test on style for alias in !!set + if isinstance(self.event, AliasEvent) and not self.event.style == '?': + self.stream.write(' ') + else: + self.write_indicator('?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + # type: () -> None + if getattr(self.event, 'style', None) != '?': + # prefix = '' + if self.indent == 0 and self.top_level_colon_align is not None: + # write non-prefixed colon + c = ' ' * (self.top_level_colon_align - self.column) + self.colon + else: + c = self.prefixed_colon + self.write_indicator(c, False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + # type: () -> None + self.write_indent() + self.write_indicator(self.prefixed_colon, True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + # type: () -> bool + return ( + isinstance(self.event, SequenceStartEvent) + and bool(self.events) + and isinstance(self.events[0], SequenceEndEvent) + ) + + def check_empty_mapping(self): + # type: () -> bool + return ( + isinstance(self.event, MappingStartEvent) + and bool(self.events) + and isinstance(self.events[0], MappingEndEvent) + ) + + def check_empty_document(self): + # type: () -> bool + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return ( + isinstance(event, ScalarEvent) + and event.anchor is None + and event.tag is None + and event.implicit + and event.value == "" + ) + + def check_simple_key(self): + # type: () -> bool + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if ( + isinstance(self.event, (ScalarEvent, CollectionStartEvent)) + and self.event.tag is not None + ): + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return length < self.MAX_SIMPLE_KEY_LENGTH and ( + isinstance(self.event, AliasEvent) + or ( + isinstance(self.event, SequenceStartEvent) + and self.event.flow_style is True + ) + or ( + isinstance(self.event, MappingStartEvent) + and self.event.flow_style is True + ) + or ( + isinstance(self.event, ScalarEvent) + # if there is an explicit style for an empty string, it is a simple key + and not (self.analysis.empty and self.style and self.style not in '\'"') + and not self.analysis.multiline + ) + or self.check_empty_sequence() + or self.check_empty_mapping() + ) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + # type: (Any) -> bool + if self.event.anchor is None: + self.prepared_anchor = None + return False + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator + self.prepared_anchor, True) + # issue 288 + self.no_newline = False + self.prepared_anchor = None + return True + + def process_tag(self): + # type: () -> None + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ( + self.event.value == '' + and self.style == "'" + and tag == 'tag:yaml.org,2002:null' + and self.alt_null is not None + ): + self.event.value = self.alt_null + self.analysis = None + self.style = self.choose_scalar_style() + if (not self.canonical or tag is None) and ( + (self.style == "" and self.event.implicit[0]) + or (self.style != "" and self.event.implicit[1]) + ): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = '!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError('tag is not specified') + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + if ( + self.sequence_context + and not self.flow_level + and isinstance(self.event, ScalarEvent) + ): + self.no_newline = True + self.prepared_tag = None + + def choose_scalar_style(self): + # type: () -> Any + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if (not self.event.style or self.event.style == '?') and ( + self.event.implicit[0] or not self.event.implicit[2] + ): + if not ( + self.simple_key_context + and (self.analysis.empty or self.analysis.multiline) + ) and ( + self.flow_level + and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain) + ): + return "" + self.analysis.allow_block = True + if self.event.style and self.event.style in '|>': + if ( + not self.flow_level + and not self.simple_key_context + and self.analysis.allow_block + ): + return self.event.style + if not self.event.style and self.analysis.allow_double_quoted: + if "'" in self.event.value or '\n' in self.event.value: + return '"' + if not self.event.style or self.event.style == "'": + if self.analysis.allow_single_quoted and not ( + self.simple_key_context and self.analysis.multiline + ): + return "'" + return '"' + + def process_scalar(self): + # type: () -> None + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = not self.simple_key_context + # if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + # nprint('xx', self.sequence_context, self.flow_level) + if self.sequence_context and not self.flow_level: + self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == "'": + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + if ( + self.event.comment + and self.indent is not None + and self.event.comment[0] + and self.event.comment[0].column >= self.indent + ): + # comment following a folded scalar must dedent (issue 376) + self.event.comment[0].column = self.indent - 1 # type: ignore + elif self.style == '|': + # self.write_literal(self.analysis.scalar, self.event.comment) + try: + cmx = self.event.comment[1][0] + except (IndexError, TypeError): + cmx = "" + self.write_literal(self.analysis.scalar, cmx) + if ( + self.event.comment + and self.indent is not None + and self.event.comment[0] + and self.event.comment[0].column >= self.indent + ): + # comment following a literal scalar must dedent (issue 376) + self.event.comment[0].column = self.indent - 1 # type: ignore + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + if self.event.comment: + self.write_post_comment(self.event) + + # Analyzers. + + def prepare_version(self, version): + # type: (Any) -> Any + major, minor = version + if major != 1: + raise EmitterError( + _F( + 'unsupported YAML version: {major:d}.{minor:d}', + major=major, + minor=minor, + ) + ) + return _F('{major:d}.{minor:d}', major=major, minor=minor) + + def prepare_tag_handle(self, handle): + # type: (Any) -> Any + if not handle: + raise EmitterError('tag handle must not be empty') + if handle[0] != '!' or handle[-1] != '!': + raise EmitterError( + _F("tag handle must start and end with '!': {handle!r}", handle=handle) + ) + for ch in handle[1:-1]: + if not ( + '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_' + ): + raise EmitterError( + _F( + 'invalid character {ch!r} in the tag handle: {handle!r}', + ch=ch, + handle=handle, + ) + ) + return handle + + def prepare_tag_prefix(self, prefix): + # type: (Any) -> Any + if not prefix: + raise EmitterError('tag prefix must not be empty') + chunks = [] # type: List[Any] + start = end = 0 + if prefix[0] == '!': + end = 1 + ch_set = "-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += '#' + while end < len(prefix): + ch = prefix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in ch_set: + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end + 1 + data = ch + for ch in data: + chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch))) + if start < end: + chunks.append(prefix[start:end]) + return "".join(chunks) + + def prepare_tag(self, tag): + # type: (Any) -> Any + if not tag: + raise EmitterError('tag must not be empty') + if tag == '!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) and (prefix == '!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix) :] + chunks = [] # type: List[Any] + start = end = 0 + ch_set = "-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += '#' + while end < len(suffix): + ch = suffix[end] + if ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in ch_set + or (ch == '!' and handle != '!') + ): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end + 1 + data = ch + for ch in data: + chunks.append(_F('%{ord_ch:02X}', ord_ch=ord(ch))) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = "".join(chunks) + if handle: + return _F( + '{handle!s}{suffix_text!s}', handle=handle, suffix_text=suffix_text + ) + else: + return _F('!<{suffix_text!s}>', suffix_text=suffix_text) + + def prepare_anchor(self, anchor): + # type: (Any) -> Any + if not anchor: + raise EmitterError('anchor must not be empty') + for ch in anchor: + if not check_anchorname_char(ch): + raise EmitterError( + _F( + 'invalid character {ch!r} in the anchor: {anchor!r}', + ch=ch, + anchor=anchor, + ) + ) + return anchor + + def analyze_scalar(self, scalar): + # type: (Any) -> Any + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis( + scalar=scalar, + empty=True, + multiline=False, + allow_flow_plain=False, + allow_block_plain=True, + allow_single_quoted=True, + allow_double_quoted=True, + allow_block=False, + ) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith('---') or scalar.startswith('...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = ( + len(scalar) == 1 or scalar[1] in '\0 \t\r\n\x85\u2028\u2029' + ) + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in '#,[]{}&*!|>\'"%@`': + flow_indicators = True + block_indicators = True + if ch in '?:': # ToDo + if self.serializer.use_version == (1, 1): + flow_indicators = True + elif len(scalar) == 1: # single character + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in ',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859 + flow_indicators = True + if ch == '?' and self.serializer.use_version == (1, 1): + flow_indicators = True + if ch == ':': + if followed_by_whitespace: + flow_indicators = True + block_indicators = True + if ch == '#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in '\n\x85\u2028\u2029': + line_breaks = True + if not (ch == '\n' or '\x20' <= ch <= '\x7E'): + if ( + ch == '\x85' + or '\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD' + or ( + self.unicode_supplementary + and ('\U00010000' <= ch <= '\U0010FFFF') + ) + ) and ch != '\uFEFF': + # unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == ' ': + if index == 0: + leading_space = True + if index == len(scalar) - 1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in '\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar) - 1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = ch in '\0 \t\r\n\x85\u2028\u2029' + followed_by_whitespace = ( + index + 1 >= len(scalar) + or scalar[index + 1] in '\0 \t\r\n\x85\u2028\u2029' + ) + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if leading_space or leading_break or trailing_space or trailing_break: + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if special_characters: + allow_flow_plain = ( + allow_block_plain + ) = allow_single_quoted = allow_block = False + elif space_break: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + if not self.allow_space_break: + allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis( + scalar=scalar, + empty=False, + multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block, + ) + + # Writers. + + def flush_stream(self): + # type: () -> None + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # type: () -> None + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write('\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + # type: () -> None + self.flush_stream() + + def write_indicator( + self, indicator, need_whitespace, whitespace=False, indention=False + ): + # type: (Any, Any, bool, bool) -> None + if self.whitespace or not need_whitespace: + data = indicator + else: + data = ' ' + indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + # type: () -> None + indent = self.indent or 0 + if ( + not self.indention + or self.column > indent + or (self.column == indent and not self.whitespace) + ): + if bool(self.no_newline): + self.no_newline = False + else: + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = ' ' * (indent - self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + + def write_line_break(self, data=None): + # type: (Any) -> None + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + # type: (Any) -> None + data = _F('%YAML {version_text!s}', version_text=version_text) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + # type: (Any, Any) -> None + data = _F( + '%TAG {handle_text!s} {prefix_text!s}', + handle_text=handle_text, + prefix_text=prefix_text, + ) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator("'", True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != ' ': + if ( + start + 1 == end + and self.column > self.best_width + and split + and start != 0 + and end != len(text) + ): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029' or ch == "'": + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == "'": + data = "''" + self.column += 2 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = ch == ' ' + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + self.write_indicator("'", False) + + ESCAPE_REPLACEMENTS = { + '\0': '0', + '\x07': 'a', + '\x08': 'b', + '\x09': 't', + '\x0A': 'n', + '\x0B': 'v', + '\x0C': 'f', + '\x0D': 'r', + '\x1B': 'e', + '"': '"', + '\\': '\\', + '\x85': 'N', + '\xA0': '_', + '\u2028': 'L', + '\u2029': 'P', + } + + def write_double_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator('"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ( + ch is None + or ch in '"\\\x85\u2028\u2029\uFEFF' + or not ( + '\x20' <= ch <= '\x7E' + or ( + self.allow_unicode + and ('\xA0' <= ch <= '\uD7FF' or '\uE000' <= ch <= '\uFFFD') + ) + ) + ): + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = '\\' + self.ESCAPE_REPLACEMENTS[ch] + elif ch <= '\xFF': + data = _F('\\x{ord_ch:02X}', ord_ch=ord(ch)) + elif ch <= '\uFFFF': + data = _F('\\u{ord_ch:04X}', ord_ch=ord(ch)) + else: + data = _F('\\U{ord_ch:08X}', ord_ch=ord(ch)) + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ( + 0 < end < len(text) - 1 + and (ch == ' ' or start >= end) + and self.column + (end - start) > self.best_width + and split + ): + data = text[start:end] + '\\' + if start < end: + start = end + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == ' ': + data = '\\' + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator('"', False) + + def determine_block_hints(self, text): + # type: (Any) -> Any + indent = 0 + indicator = '' + hints = '' + if text: + if text[0] in ' \n\x85\u2028\u2029': + indent = self.best_sequence_indent + hints += str(indent) + elif self.root_context: + for end in ['\n---', '\n...']: + pos = 0 + while True: + pos = text.find(end, pos) + if pos == -1: + break + try: + if text[pos + 4] in ' \r\n': + break + except IndexError: + pass + pos += 1 + if pos > -1: + break + if pos > 0: + indent = self.best_sequence_indent + if text[-1] not in '\n\x85\u2028\u2029': + indicator = '-' + elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': + indicator = '+' + hints += indicator + return hints, indent, indicator + + def write_folded(self, text): + # type: (Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator('>' + hints, True) + if _indicator == '+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029\a': + if ( + not leading_space + and ch is not None + and ch != ' ' + and text[start] == '\n' + ): + self.write_line_break() + leading_space = ch == ' ' + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != ' ': + if start + 1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029\a': + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch == '\a': + if end < (len(text) - 1) and not text[end + 2].isspace(): + self.write_line_break() + self.write_indent() + end += 2 # \a and the space that is inserted on the fold + else: + raise EmitterError( + 'unexcpected fold indicator \\a before space' + ) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in '\n\x85\u2028\u2029' + spaces = ch == ' ' + end += 1 + + def write_literal(self, text, comment=None): + # type: (Any, Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + # if comment is not None: + # try: + # hints += comment[1][0] + # except (TypeError, IndexError) as e: + # pass + if not isinstance(comment, str): + comment = '' + self.write_indicator('|' + hints + comment, True) + # try: + # nprintf('selfev', comment) + # cmx = comment[1][0] + # if cmx: + # self.stream.write(cmx) + # except (TypeError, IndexError) as e: + # pass + if _indicator == '+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + if self.root_context: + idnx = self.indent if self.indent is not None else 0 + self.stream.write(' ' * (_indent + idnx)) + else: + self.write_indent() + start = end + else: + if ch is None or ch in '\n\x85\u2028\u2029': + data = text[start:end] + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + + def write_plain(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + else: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = ' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != ' ': + if start + 1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + self.stream.write(data) + start = end + elif breaks: + if ch not in '\n\x85\u2028\u2029': # type: ignore + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) # type: ignore + try: + self.stream.write(data) + except: # NOQA + sys.stdout.write(repr(data) + '\n') + raise + start = end + if ch is not None: + spaces = ch == ' ' + breaks = ch in '\n\x85\u2028\u2029' + end += 1 + + def write_comment(self, comment, pre=False): + # type: (Any, bool) -> None + value = comment.value + # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value)) + if not pre and value[-1] == '\n': + value = value[:-1] + try: + # get original column position + col = comment.start_mark.column + if comment.value and comment.value.startswith('\n'): + # never inject extra spaces if the comment starts with a newline + # and not a real comment (e.g. if you have an empty line following a key-value + col = self.column + elif col < self.column + 1: + ValueError + except ValueError: + col = self.column + 1 + # nprint('post_comment', self.line, self.column, value) + try: + # at least one space if the current column >= the start column of the comment + # but not at the start of a line + nr_spaces = col - self.column + if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n': + nr_spaces = 1 + value = ' ' * nr_spaces + value + try: + if bool(self.encoding): + value = value.encode(self.encoding) + except UnicodeDecodeError: + pass + self.stream.write(value) + except TypeError: + raise + if not pre: + self.write_line_break() + + def write_pre_comment(self, event): + # type: (Any) -> bool + comments = event.comment[1] + if comments is None: + return False + try: + start_events = (MappingStartEvent, SequenceStartEvent) + for comment in comments: + if isinstance(event, start_events) and getattr( + comment, 'pre_done', None + ): + continue + if self.column != 0: + self.write_line_break() + self.write_comment(comment, pre=True) + if isinstance(event, start_events): + comment.pre_done = True + except TypeError: + sys.stdout.write('eventtt {} {}'.format(type(event), event)) + raise + return True + + def write_post_comment(self, event): + # type: (Any) -> bool + if self.event.comment[0] is None: + return False + comment = event.comment[0] + self.write_comment(comment) + return True diff --git a/lib/ruyaml/error.py b/lib/ruyaml/error.py new file mode 100644 index 0000000..7b04a00 --- /dev/null +++ b/lib/ruyaml/error.py @@ -0,0 +1,334 @@ +# coding: utf-8 + +import textwrap +import warnings + +from ruyaml.compat import _F + +if False: # MYPY + from typing import Any, Dict, List, Optional, Text # NOQA + + +__all__ = [ + 'FileMark', + 'StringMark', + 'CommentMark', + 'YAMLError', + 'MarkedYAMLError', + 'ReusedAnchorWarning', + 'UnsafeLoaderWarning', + 'MarkedYAMLWarning', + 'MarkedYAMLFutureWarning', +] + + +class StreamMark: + __slots__ = 'name', 'index', 'line', 'column' + + def __init__(self, name, index, line, column): + # type: (Any, int, int, int) -> None + self.name = name + self.index = index + self.line = line + self.column = column + + def __str__(self): + # type: () -> Any + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + return where + + def __eq__(self, other): + # type: (Any) -> bool + if self.line != other.line or self.column != other.column: + return False + if self.name != other.name or self.index != other.index: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) + + +class FileMark(StreamMark): + __slots__ = () + + +class StringMark(StreamMark): + __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer' + + def __init__(self, name, index, line, column, buffer, pointer): + # type: (Any, int, int, int, Any, Any) -> None + StreamMark.__init__(self, name, index, line, column) + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + # type: (int, int) -> Any + if self.buffer is None: # always False + return None + head = "" + start = self.pointer + while start > 0 and self.buffer[start - 1] not in '\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer - start > max_length / 2 - 1: + head = ' ... ' + start += 5 + break + tail = "" + end = self.pointer + while ( + end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029' + ): + end += 1 + if end - self.pointer > max_length / 2 - 1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end] + caret = '^' + caret = '^ (line: {})'.format(self.line + 1) + return ( + ' ' * indent + + head + + snippet + + tail + + '\n' + + ' ' * (indent + self.pointer - start + len(head)) + + caret + ) + + def __str__(self): + # type: () -> Any + snippet = self.get_snippet() + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + if snippet is not None: + where += ':\n' + snippet + return where + + def __repr__(self): + # type: () -> Any + snippet = self.get_snippet() + where = _F( + ' in "{sname!s}", line {sline1:d}, column {scolumn1:d}', + sname=self.name, + sline1=self.line + 1, + scolumn1=self.column + 1, + ) + if snippet is not None: + where += ':\n' + snippet + return where + + +class CommentMark: + __slots__ = ('column',) + + def __init__(self, column): + # type: (Any) -> None + self.column = column + + +class YAMLError(Exception): + pass + + +class MarkedYAMLError(YAMLError): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + # warn is ignored + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + return '\n'.join(lines) + + +class YAMLStreamError(Exception): + pass + + +class YAMLWarning(Warning): + pass + + +class MarkedYAMLWarning(YAMLWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) + + +class ReusedAnchorWarning(YAMLWarning): + pass + + +class UnsafeLoaderWarning(YAMLWarning): + text = """ +The default 'Loader' for 'load(stream)' without further arguments can be unsafe. +Use 'load(stream, Loader=ruyaml.Loader)' explicitly if that is OK. +Alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruyaml.error.UnsafeLoaderWarning) + +In most other cases you should consider using 'safe_load(stream)'""" + pass + + +warnings.simplefilter('once', UnsafeLoaderWarning) + + +class MantissaNoDotYAML1_1Warning(YAMLWarning): + def __init__(self, node, flt_str): + # type: (Any, Any) -> None + self.node = node + self.flt = flt_str + + def __str__(self): + # type: () -> Any + line = self.node.start_mark.line + col = self.node.start_mark.column + return """ +In YAML 1.1 floating point values should have a dot ('.') in their mantissa. +See the Floating-Point Language-Independent Type for YAMLâ„¢ Version 1.1 specification +( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2 + +Correct your float: "{}" on line: {}, column: {} + +or alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruyaml.error.MantissaNoDotYAML1_1Warning) + +""".format( + self.flt, line, col + ) + + +warnings.simplefilter('once', MantissaNoDotYAML1_1Warning) + + +class YAMLFutureWarning(Warning): + pass + + +class MarkedYAMLFutureWarning(YAMLFutureWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) diff --git a/lib/ruyaml/events.py b/lib/ruyaml/events.py new file mode 100644 index 0000000..558d2db --- /dev/null +++ b/lib/ruyaml/events.py @@ -0,0 +1,201 @@ +# coding: utf-8 + +from ruyaml.compat import _F + +# Abstract classes. + +if False: # MYPY + from typing import Any, Dict, List, Optional # NOQA + +SHOW_LINES = False + + +def CommentCheck(): + # type: () -> None + pass + + +class Event: + __slots__ = 'start_mark', 'end_mark', 'comment' + + def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck): + # type: (Any, Any, Any) -> None + self.start_mark = start_mark + self.end_mark = end_mark + # assert comment is not CommentCheck + if comment is CommentCheck: + comment = None + self.comment = comment + + def __repr__(self): + # type: () -> Any + if True: + arguments = [] + if hasattr(self, 'value'): + # if you use repr(getattr(self, 'value')) then flake8 complains about + # abuse of getattr with a constant. When you change to self.value + # then mypy throws an error + arguments.append(repr(self.value)) # type: ignore + for key in ['anchor', 'tag', 'implicit', 'flow_style', 'style']: + v = getattr(self, key, None) + if v is not None: + arguments.append(_F('{key!s}={v!r}', key=key, v=v)) + if self.comment not in [None, CommentCheck]: + arguments.append('comment={!r}'.format(self.comment)) + if SHOW_LINES: + arguments.append( + '({}:{}/{}:{})'.format( + self.start_mark.line, # type: ignore + self.start_mark.column, # type: ignore + self.end_mark.line, # type: ignore + self.end_mark.column, # type: ignore + ) + ) + arguments = ', '.join(arguments) # type: ignore + else: + attributes = [ + key + for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style'] + if hasattr(self, key) + ] + arguments = ', '.join( + [ + _F('{k!s}={attr!r}', k=key, attr=getattr(self, key)) + for key in attributes + ] + ) + if self.comment not in [None, CommentCheck]: + arguments += ', comment={!r}'.format(self.comment) + return _F( + '{self_class_name!s}({arguments!s})', + self_class_name=self.__class__.__name__, + arguments=arguments, + ) + + +class NodeEvent(Event): + __slots__ = ('anchor',) + + def __init__(self, anchor, start_mark=None, end_mark=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.anchor = anchor + + +class CollectionStartEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items' + + def __init__( + self, + anchor, + tag, + implicit, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + nr_items=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.flow_style = flow_style + self.nr_items = nr_items + + +class CollectionEndEvent(Event): + __slots__ = () + + +# Implementations. + + +class StreamStartEvent(Event): + __slots__ = ('encoding',) + + def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.encoding = encoding + + +class StreamEndEvent(Event): + __slots__ = () + + +class DocumentStartEvent(Event): + __slots__ = 'explicit', 'version', 'tags' + + def __init__( + self, + start_mark=None, + end_mark=None, + explicit=None, + version=None, + tags=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + self.version = version + self.tags = tags + + +class DocumentEndEvent(Event): + __slots__ = ('explicit',) + + def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + + +class AliasEvent(NodeEvent): + __slots__ = 'style' + + def __init__( + self, anchor, start_mark=None, end_mark=None, style=None, comment=None + ): + # type: (Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.style = style + + +class ScalarEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'value', 'style' + + def __init__( + self, + anchor, + tag, + implicit, + value, + start_mark=None, + end_mark=None, + style=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.value = value + self.style = style + + +class SequenceStartEvent(CollectionStartEvent): + __slots__ = () + + +class SequenceEndEvent(CollectionEndEvent): + __slots__ = () + + +class MappingStartEvent(CollectionStartEvent): + __slots__ = () + + +class MappingEndEvent(CollectionEndEvent): + __slots__ = () diff --git a/lib/ruyaml/loader.py b/lib/ruyaml/loader.py new file mode 100644 index 0000000..f41a889 --- /dev/null +++ b/lib/ruyaml/loader.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +from ruyaml.composer import Composer +from ruyaml.constructor import ( + BaseConstructor, + Constructor, + RoundTripConstructor, + SafeConstructor, +) +from ruyaml.parser import Parser, RoundTripParser +from ruyaml.reader import Reader +from ruyaml.resolver import VersionedResolver +from ruyaml.scanner import RoundTripScanner, Scanner + +if False: # MYPY + from typing import Any, Dict, List, Optional, Union # NOQA + + from ruyaml.compat import StreamTextType, VersionType # NOQA + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader'] + + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + BaseConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + SafeConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + self.comment_handling = None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + Constructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class RoundTripLoader( + Reader, + RoundTripScanner, + RoundTripParser, + Composer, + RoundTripConstructor, + VersionedResolver, +): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + # self.reader = Reader.__init__(self, stream) + self.comment_handling = None # issue 385 + Reader.__init__(self, stream, loader=self) + RoundTripScanner.__init__(self, loader=self) + RoundTripParser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + RoundTripConstructor.__init__( + self, preserve_quotes=preserve_quotes, loader=self + ) + VersionedResolver.__init__(self, version, loader=self) diff --git a/lib/ruyaml/main.py b/lib/ruyaml/main.py new file mode 100644 index 0000000..1b5f06c --- /dev/null +++ b/lib/ruyaml/main.py @@ -0,0 +1,1702 @@ +# coding: utf-8 + +import glob +import os +import sys +import warnings +from importlib import import_module +from io import BytesIO, StringIO +from typing import Any, List, Optional, Text, Union + +import ruyaml +from ruyaml.comments import C_PRE, CommentedMap, CommentedSeq +from ruyaml.compat import ( # NOQA + StreamTextType, + StreamType, + VersionType, + nprint, + nprintf, +) +from ruyaml.constructor import ( + BaseConstructor, + Constructor, + RoundTripConstructor, + SafeConstructor, +) +from ruyaml.dumper import BaseDumper, Dumper, RoundTripDumper, SafeDumper # NOQA +from ruyaml.error import UnsafeLoaderWarning, YAMLError # NOQA +from ruyaml.events import * # NOQA +from ruyaml.loader import BaseLoader # NOQA; NOQA +from ruyaml.loader import Loader +from ruyaml.loader import Loader as UnsafeLoader +from ruyaml.loader import RoundTripLoader, SafeLoader +from ruyaml.nodes import * # NOQA +from ruyaml.representer import ( + BaseRepresenter, + Representer, + RoundTripRepresenter, + SafeRepresenter, +) +from ruyaml.resolver import Resolver, VersionedResolver # NOQA +from ruyaml.tokens import * # NOQA + +if False: # MYPY + from pathlib import Path + from typing import Any, Callable, Dict, List, Optional, Set, Text, Union # NOQA + + from ruyaml.compat import StreamTextType, StreamType, VersionType # NOQA + +try: + from _ruyaml import CEmitter, CParser # type: ignore +except: # NOQA + CParser = CEmitter = None + +# import io + + +# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a +# subset of abbreviations, which should be all caps according to PEP8 + + +class YAML: + def __init__( + self, + *, + typ=None, + pure=False, + output=None, + plug_ins=None, # input=None, + ): + # type: (Any, Optional[Text], Any, Any, Any) -> None + """ + typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default) + 'safe' -> SafeLoader/SafeDumper, + 'unsafe' -> normal/unsafe Loader/Dumper + 'base' -> baseloader + pure: if True only use Python modules + input/output: needed to work as context manager + plug_ins: a list of plug-in files + """ + + self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ]) + self.pure = pure + + # self._input = input + self._output = output + self._context_manager = None # type: Any + + self.plug_ins = [] # type: List[Any] + for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins(): + file_name = pu.replace(os.sep, '.') + self.plug_ins.append(import_module(file_name)) + self.Resolver = ruyaml.resolver.VersionedResolver # type: Any + self.allow_unicode = True + self.Reader = None # type: Any + self.Representer = None # type: Any + self.Constructor = None # type: Any + self.Scanner = None # type: Any + self.Serializer = None # type: Any + self.default_flow_style = None # type: Any + self.comment_handling = None + typ_found = 1 + setup_rt = False + if 'rt' in self.typ: + setup_rt = True + elif 'safe' in self.typ: + self.Emitter = ( + ruyaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruyaml.representer.SafeRepresenter + self.Parser = ruyaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruyaml.composer.Composer + self.Constructor = ruyaml.constructor.SafeConstructor + elif 'base' in self.typ: + self.Emitter = ruyaml.emitter.Emitter + self.Representer = ruyaml.representer.BaseRepresenter + self.Parser = ruyaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruyaml.composer.Composer + self.Constructor = ruyaml.constructor.BaseConstructor + elif 'unsafe' in self.typ: + self.Emitter = ( + ruyaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruyaml.representer.Representer + self.Parser = ruyaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruyaml.composer.Composer + self.Constructor = ruyaml.constructor.Constructor + elif 'rtsc' in self.typ: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruyaml.emitter.Emitter + self.Serializer = ruyaml.serializer.Serializer + self.Representer = ruyaml.representer.RoundTripRepresenter + self.Scanner = ruyaml.scanner.RoundTripScannerSC + # no optimized rt-parser yet + self.Parser = ruyaml.parser.RoundTripParserSC + self.Composer = ruyaml.composer.Composer + self.Constructor = ruyaml.constructor.RoundTripConstructor + self.comment_handling = C_PRE + else: + setup_rt = True + typ_found = 0 + if setup_rt: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruyaml.emitter.Emitter + self.Serializer = ruyaml.serializer.Serializer + self.Representer = ruyaml.representer.RoundTripRepresenter + self.Scanner = ruyaml.scanner.RoundTripScanner + # no optimized rt-parser yet + self.Parser = ruyaml.parser.RoundTripParser + self.Composer = ruyaml.composer.Composer + self.Constructor = ruyaml.constructor.RoundTripConstructor + del setup_rt + self.stream = None + self.canonical = None + self.old_indent = None + self.width = None + self.line_break = None + + self.map_indent = None + self.sequence_indent = None + self.sequence_dash_offset = 0 + self.compact_seq_seq = None + self.compact_seq_map = None + self.sort_base_mapping_type_on_output = None # default: sort + + self.top_level_colon_align = None + self.prefix_colon = None + self.version = None + self.preserve_quotes = None + self.allow_duplicate_keys = False # duplicate keys in map, set + self.encoding = 'utf-8' + self.explicit_start = None + self.explicit_end = None + self.tags = None + self.default_style = None + self.top_level_block_style_scalar_no_indent_error_1_1 = False + # directives end indicator with single scalar document + self.scalar_after_indicator = None + # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}] + self.brace_single_entry_mapping_in_flow_sequence = False + for module in self.plug_ins: + if getattr(module, 'typ', None) in self.typ: + typ_found += 1 + module.init_typ(self) + break + if typ_found == 0: + raise NotImplementedError( + 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ) + ) + + @property + def reader(self): + # type: () -> Any + try: + return self._reader # type: ignore + except AttributeError: + self._reader = self.Reader(None, loader=self) + return self._reader + + @property + def scanner(self): + # type: () -> Any + try: + return self._scanner # type: ignore + except AttributeError: + self._scanner = self.Scanner(loader=self) + return self._scanner + + @property + def parser(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Parser is not CParser: + setattr(self, attr, self.Parser(loader=self)) + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + else: + # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'): + # # pathlib.Path() instance + # setattr(self, attr, CParser(self._stream)) + # else: + setattr(self, attr, CParser(self._stream)) + # self._parser = self._composer = self + # nprint('scanner', self.loader.scanner) + + return getattr(self, attr) + + @property + def composer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Composer(loader=self)) + return getattr(self, attr) + + @property + def constructor(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self) + cnst.allow_duplicate_keys = self.allow_duplicate_keys + setattr(self, attr, cnst) + return getattr(self, attr) + + @property + def resolver(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Resolver(version=self.version, loader=self)) + return getattr(self, attr) + + @property + def emitter(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Emitter is not CEmitter: + _emitter = self.Emitter( + None, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + prefix_colon=self.prefix_colon, + brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA + dumper=self, + ) + setattr(self, attr, _emitter) + if self.map_indent is not None: + _emitter.best_map_indent = self.map_indent + if self.sequence_indent is not None: + _emitter.best_sequence_indent = self.sequence_indent + if self.sequence_dash_offset is not None: + _emitter.sequence_dash_offset = self.sequence_dash_offset + # _emitter.block_seq_indent = self.sequence_dash_offset + if self.compact_seq_seq is not None: + _emitter.compact_seq_seq = self.compact_seq_seq + if self.compact_seq_map is not None: + _emitter.compact_seq_map = self.compact_seq_map + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + return None + return getattr(self, attr) + + @property + def serializer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr( + self, + attr, + self.Serializer( + encoding=self.encoding, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + dumper=self, + ), + ) + return getattr(self, attr) + + @property + def representer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + repres = self.Representer( + default_style=self.default_style, + default_flow_style=self.default_flow_style, + dumper=self, + ) + if self.sort_base_mapping_type_on_output is not None: + repres.sort_base_mapping_type_on_output = ( + self.sort_base_mapping_type_on_output + ) + setattr(self, attr, repres) + return getattr(self, attr) + + def scan(self, stream): + # type: (StreamTextType) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.scan(fp) + _, parser = self.get_constructor_parser(stream) + try: + while self.scanner.check_token(): + yield self.scanner.get_token() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def parse(self, stream): + # type: (StreamTextType) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.parse(fp) + _, parser = self.get_constructor_parser(stream) + try: + while parser.check_event(): + yield parser.get_event() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def compose(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.load(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.composer.get_single_node() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def compose_all(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.composer.check_node(): + yield constructor.composer.get_node() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + # separate output resolver? + + # def load(self, stream=None): + # if self._context_manager: + # if not self._input: + # raise TypeError("Missing input stream while dumping from context manager") + # for data in self._context_manager.load(): + # yield data + # return + # if stream is None: + # raise TypeError("Need a stream argument when not loading from context manager") + # return self.load_one(stream) + + def load(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + at this point you either have the non-pure Parser (which has its own reader and + scanner) or you have the pure Parser. + If the pure Parser is set, then set the Reader and Scanner, if not already set. + If either the Scanner or Reader are set, you cannot use the non-pure Parser, + so reset it to the pure parser and set the Reader resp. Scanner if necessary + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.load(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.get_single_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def load_all(self, stream): # *, skip=None): + # type: (Union[Path, StreamTextType]) -> Any + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('r') as fp: + yield from self.load_all(fp) + return + # if skip is None: + # skip = [] + # elif isinstance(skip, int): + # skip = [skip] + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.check_data(): + yield constructor.get_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def get_constructor_parser(self, stream): + # type: (StreamTextType) -> Any + """ + the old cyaml needs special setup, and therefore the stream + """ + if self.Parser is not CParser: + if self.Reader is None: + self.Reader = ruyaml.reader.Reader + if self.Scanner is None: + self.Scanner = ruyaml.scanner.Scanner + self.reader.stream = stream + else: + if self.Reader is not None: + if self.Scanner is None: + self.Scanner = ruyaml.scanner.Scanner + self.Parser = ruyaml.parser.Parser + self.reader.stream = stream + elif self.Scanner is not None: + if self.Reader is None: + self.Reader = ruyaml.reader.Reader + self.Parser = ruyaml.parser.Parser + self.reader.stream = stream + else: + # combined C level reader>scanner>parser + # does some calls to the resolver, e.g. BaseResolver.descend_resolver + # if you just initialise the CParser, to much of resolver.py + # is actually used + rslvr = self.Resolver + # if rslvr is ruyaml.resolver.VersionedResolver: + # rslvr = ruyaml.resolver.Resolver + + class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore + def __init__( + selfx, stream, version=self.version, preserve_quotes=None + ): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA + CParser.__init__(selfx, stream) + selfx._parser = selfx._composer = selfx + self.Constructor.__init__(selfx, loader=selfx) + selfx.allow_duplicate_keys = self.allow_duplicate_keys + rslvr.__init__(selfx, version=version, loadumper=selfx) + + self._stream = stream + loader = XLoader(stream) + return loader, loader + return self.constructor, self.parser + + def emit(self, events, stream): + # type: (Any, Any) -> None + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + _, _, emitter = self.get_serializer_representer_emitter(stream, None) + try: + for event in events: + emitter.emit(event) + finally: + try: + emitter.dispose() + except AttributeError: + raise + + def serialize(self, node, stream): + # type: (Any, Optional[StreamType]) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + self.serialize_all([node], stream) + + def serialize_all(self, nodes, stream): + # type: (Any, Optional[StreamType]) -> Any + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + serializer, _, emitter = self.get_serializer_representer_emitter(stream, None) + try: + serializer.open() + for node in nodes: + serializer.serialize(node) + serializer.close() + finally: + try: + emitter.dispose() + except AttributeError: + raise + + def dump(self, data, stream=None, *, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + if not self._output: + raise TypeError( + 'Missing output stream while dumping from context manager' + ) + if transform is not None: + raise TypeError( + '{}.dump() in the context manager cannot have transform keyword ' + ''.format(self.__class__.__name__) + ) + self._context_manager.dump(data) + else: # old style + if stream is None: + raise TypeError( + 'Need a stream argument when not dumping from context manager' + ) + return self.dump_all([data], stream, transform=transform) + + def dump_all(self, documents, stream, *, transform=None): + # type: (Any, StreamType, Any) -> Any + if self._context_manager: + raise NotImplementedError + self._output = stream + self._context_manager = YAMLContextManager(self, transform=transform) + for data in documents: + self._context_manager.dump(data) + self._context_manager.teardown_output() + self._output = None + self._context_manager = None + + def Xdump_all(self, documents, stream, *, transform=None): + # type: (Any, Any, Any) -> Any + """ + Serialize a sequence of Python objects into a YAML stream. + """ + if not hasattr(stream, 'write') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('w') as fp: + return self.dump_all(documents, fp, transform=transform) + # The stream should have the methods `write` and possibly `flush`. + + documents: StreamType = documents # mypy workaround + + if self.top_level_colon_align is True: + tlca = max([len(str(x)) for x in documents[0]]) # type: Any # NOQA + else: + tlca = self.top_level_colon_align + if transform is not None: + fstream = stream + if self.encoding is None: + stream = StringIO() + else: + stream = BytesIO() + serializer, representer, emitter = self.get_serializer_representer_emitter( + stream, tlca + ) + try: + self.serializer.open() + for data in documents: # NOQA + try: + self.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + self.serializer.close() + finally: + try: + self.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + delattr(self, '_serializer') + delattr(self, '_emitter') + if transform: + val = stream.getvalue() # type: ignore + if self.encoding: + val = val.decode(self.encoding) + if fstream is None: + transform(val) + else: + fstream.write(transform(val)) # type: ignore + return None + + def get_serializer_representer_emitter(self, stream, tlca): + # type: (StreamType, Any) -> Any + # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler + if self.Emitter is not CEmitter: + if self.Serializer is None: + self.Serializer = ruyaml.serializer.Serializer + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + if self.Serializer is not None: + # cannot set serializer with CEmitter + self.Emitter = ruyaml.emitter.Emitter + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + # C routines + + rslvr = ( + ruyaml.resolver.BaseResolver + if 'base' in self.typ + else ruyaml.resolver.Resolver + ) + + class XDumper(CEmitter, self.Representer, rslvr): # type: ignore + def __init__( + selfx, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + selfx, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + selfx._emitter = selfx._serializer = selfx._representer = selfx + self.Representer.__init__( + selfx, + default_style=default_style, + default_flow_style=default_flow_style, + ) + rslvr.__init__(selfx) + + self._stream = stream + dumper = XDumper( + stream, + default_style=self.default_style, + default_flow_style=self.default_flow_style, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + ) + self._emitter = self._serializer = dumper + return dumper, dumper, dumper + + # basic types + def map(self, **kw): + # type: (Any) -> Any + if 'rt' in self.typ: + return CommentedMap(**kw) + else: + return dict(**kw) + + def seq(self, *args): + # type: (Any) -> Any + if 'rt' in self.typ: + return CommentedSeq(*args) + else: + return list(*args) + + # helpers + def official_plug_ins(self): + # type: () -> Any + """search for list of subdirs that are plug-ins, if __file__ is not available, e.g. + single file installers that are not properly emulating a file-system (issue 324) + no plug-ins will be found. If any are packaged, you know which file that are + and you can explicitly provide it during instantiation: + yaml = ruyaml.YAML(plug_ins=['ruyaml/jinja2/__plug_in__']) + """ + try: + bd = os.path.dirname(__file__) + except NameError: + return [] + gpbd = os.path.dirname(os.path.dirname(bd)) + res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')] + return res + + def register_class(self, cls): + # type:(Any) -> Any + """ + register a class for dumping loading + - if it has attribute yaml_tag use that to register, else use class name + - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes + as mapping + """ + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + self.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + self.representer.add_representer(cls, t_y) + try: + self.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + self.constructor.add_constructor(tag, f_y) + return cls + + # ### context manager + + def __enter__(self): + # type: () -> Any + self._context_manager = YAMLContextManager(self) + return self + + def __exit__(self, typ, value, traceback): + # type: (Any, Any, Any) -> None + if typ: + nprint('typ', typ) + self._context_manager.teardown_output() + # self._context_manager.teardown_input() + self._context_manager = None + + # ### backwards compatibility + def _indent(self, mapping=None, sequence=None, offset=None): + # type: (Any, Any, Any) -> None + if mapping is not None: + self.map_indent = mapping + if sequence is not None: + self.sequence_indent = sequence + if offset is not None: + self.sequence_dash_offset = offset + + @property + def indent(self): + # type: () -> Any + return self._indent + + @indent.setter + def indent(self, val): + # type: (Any) -> None + self.old_indent = val + + @property + def block_seq_indent(self): + # type: () -> Any + return self.sequence_dash_offset + + @block_seq_indent.setter + def block_seq_indent(self, val): + # type: (Any) -> None + self.sequence_dash_offset = val + + def compact(self, seq_seq=None, seq_map=None): + # type: (Any, Any) -> None + self.compact_seq_seq = seq_seq + self.compact_seq_map = seq_map + + +class YAMLContextManager: + def __init__(self, yaml, transform=None): + # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None + self._yaml = yaml + self._output_inited = False + self._output_path = None + self._output = self._yaml._output + self._transform = transform + + # self._input_inited = False + # self._input = input + # self._input_path = None + # self._transform = yaml.transform + # self._fstream = None + + if not hasattr(self._output, 'write') and hasattr(self._output, 'open'): + # pathlib.Path() instance, open with the same mode + self._output_path = self._output + self._output = self._output_path.open('w') + + # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'): + # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'): + # # pathlib.Path() instance, open with the same mode + # self._input_path = self._input + # self._input = self._input_path.open('r') + + if self._transform is not None: + self._fstream = self._output + if self._yaml.encoding is None: + self._output = StringIO() + else: + self._output = BytesIO() + + def teardown_output(self): + # type: () -> None + if self._output_inited: + self._yaml.serializer.close() + else: + return + try: + self._yaml.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + try: + delattr(self._yaml, '_serializer') + delattr(self._yaml, '_emitter') + except AttributeError: + raise + if self._transform: + val = self._output.getvalue() + if self._yaml.encoding: + val = val.decode(self._yaml.encoding) + if self._fstream is None: + self._transform(val) + else: + self._fstream.write(self._transform(val)) + self._fstream.flush() + self._output = self._fstream # maybe not necessary + if self._output_path is not None: + self._output.close() + + def init_output(self, first_data): + # type: (Any) -> None + if self._yaml.top_level_colon_align is True: + tlca = max([len(str(x)) for x in first_data]) # type: Any + else: + tlca = self._yaml.top_level_colon_align + self._yaml.get_serializer_representer_emitter(self._output, tlca) + self._yaml.serializer.open() + self._output_inited = True + + def dump(self, data): + # type: (Any) -> None + if not self._output_inited: + self.init_output(data) + try: + self._yaml.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + + # def teardown_input(self): + # pass + # + # def init_input(self): + # # set the constructor and parser on YAML() instance + # self._yaml.get_constructor_parser(stream) + # + # def load(self): + # if not self._input_inited: + # self.init_input() + # try: + # while self._yaml.constructor.check_data(): + # yield self._yaml.constructor.get_data() + # finally: + # parser.dispose() + # try: + # self._reader.reset_reader() # type: ignore + # except AttributeError: + # pass + # try: + # self._scanner.reset_scanner() # type: ignore + # except AttributeError: + # pass + + +def yaml_object(yml): + # type: (Any) -> Any + """decorator for classes that needs to dump/load objects + The tag for such objects is taken from the class attribute yaml_tag (or the + class name in lowercase in case unavailable) + If methods to_yaml and/or from_yaml are available, these are called for dumping resp. + loading, default routines (dumping a mapping of the attributes) used otherwise. + """ + + def yo_deco(cls): + # type: (Any) -> Any + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + yml.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + yml.representer.add_representer(cls, t_y) + try: + yml.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + yml.constructor.add_constructor(tag, f_y) + return cls + + return yo_deco + + +######################################################################################## +def warn_deprecation(fun, method, arg=''): + # type: (Any, Any, str) -> None + from ruyaml.compat import _F + + warnings.warn( + _F( + '\n{fun} will be removed, use\n\n yaml=YAML({arg})\n yaml.{method}(...)\n\ninstead', # NOQA + fun=fun, + method=method, + arg=arg, + ), + PendingDeprecationWarning, # this will show when testing with pytest/tox + stacklevel=3, + ) + + +######################################################################################## + + +def scan(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + warn_deprecation('scan', 'scan', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader.scanner.check_token(): + yield loader.scanner.get_token() + finally: + loader._parser.dispose() + + +def parse(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + warn_deprecation('parse', 'parse', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader._parser.check_event(): + yield loader._parser.get_event() + finally: + loader._parser.dispose() + + +def compose(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + + +def compose_all(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + warn_deprecation('compose', 'compose', arg="typ='unsafe', pure=True") + loader = Loader(stream) + try: + while loader.check_node(): + yield loader._composer.get_node() + finally: + loader._parser.dispose() + + +def load(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Any, Any, Any, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + warn_deprecation('load', 'load', arg="typ='unsafe', pure=True") + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any + try: + return loader._constructor.get_single_data() # type: ignore + finally: + loader._parser.dispose() # type: ignore + try: + loader._reader.reset_reader() # type: ignore + except AttributeError: + pass + try: + loader._scanner.reset_scanner() # type: ignore + except AttributeError: + pass + + +def load_all(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Any, Any, Any, Any) -> Any # NOQA + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + warn_deprecation('load_all', 'load_all', arg="typ='unsafe', pure=True") + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) # type: Any + try: + while loader._constructor.check_data(): # type: ignore + yield loader._constructor.get_data() # type: ignore + finally: + loader._parser.dispose() # type: ignore + try: + loader._reader.reset_reader() # type: ignore + except AttributeError: + pass + try: + loader._scanner.reset_scanner() # type: ignore + except AttributeError: + pass + + +def safe_load(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + warn_deprecation('safe_load', 'load', arg="typ='safe', pure=True") + return load(stream, SafeLoader, version) + + +def safe_load_all(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + warn_deprecation('safe_load_all', 'load_all', arg="typ='safe', pure=True") + return load_all(stream, SafeLoader, version) + + +def round_trip_load(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + warn_deprecation('round_trip_load_all', 'load') + return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def round_trip_load_all(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + warn_deprecation('round_trip_load_all', 'load_all') + return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def emit( + events, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, +): + # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('emit', 'emit', arg="typ='safe', pure=True") + getvalue = None + if stream is None: + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + ) + try: + for event in events: + dumper.emit(event) + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +enc = None + + +def serialize_all( + nodes, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, +): + # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('serialize_all', 'serialize_all', arg="typ='safe', pure=True") + getvalue = None + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + version=version, + tags=tags, + explicit_start=explicit_start, + explicit_end=explicit_end, + ) + try: + dumper._serializer.open() + for node in nodes: + dumper.serialize(node) + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + # type: (Any, Optional[StreamType], Any, Any) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('serialize', 'serialize', arg="typ='safe', pure=True") + return serialize_all([node], stream, Dumper=Dumper, **kwds) + + +def dump_all( + documents, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Any # NOQA + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + warn_deprecation('dump_all', 'dump_all', arg="typ='unsafe', pure=True") + getvalue = None + if top_level_colon_align is True: + top_level_colon_align = max([len(str(x)) for x in documents[0]]) + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + try: + dumper._serializer.open() + for data in documents: + try: + dumper._representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() # type: ignore + return None + + +def dump( + data, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[Any] # NOQA + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + + default_style ∈ None, '', '"', "'", '|', '>' + + """ + warn_deprecation('dump', 'dump', arg="typ='unsafe', pure=True") + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + ) + + +def safe_dump_all(documents, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[Any] + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + warn_deprecation('safe_dump_all', 'dump_all', arg="typ='safe', pure=True") + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + + +def safe_dump(data, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[Any] + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + warn_deprecation('safe_dump', 'dump', arg="typ='safe', pure=True") + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + + +def round_trip_dump( + data, + stream=None, + Dumper=RoundTripDumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[Any] # NOQA + allow_unicode = True if allow_unicode is None else allow_unicode + warn_deprecation('round_trip_dump', 'dump') + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + + +# Loader/Dumper are no longer composites, to get to the associated +# Resolver()/Representer(), etc., you need to instantiate the class + + +def add_implicit_resolver( + tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver +): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None and Dumper is None: + resolver.add_implicit_resolver(tag, regexp, first) + return + if Loader: + if hasattr(Loader, 'add_implicit_resolver'): + Loader.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruyaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_implicit_resolver'): + Dumper.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruyaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + + +# this code currently not tested +def add_path_resolver( + tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver +): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None and Dumper is None: + resolver.add_path_resolver(tag, path, kind) + return + if Loader: + if hasattr(Loader, 'add_path_resolver'): + Loader.add_path_resolver(tag, path, kind) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruyaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_path_resolver'): + Dumper.add_path_resolver(tag, path, kind) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruyaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + + +def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add an object constructor for the given tag. + object_onstructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_constructor(tag, object_constructor) + else: + if hasattr(Loader, 'add_constructor'): + Loader.add_constructor(tag, object_constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, Loader): + Constructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_constructor(tag, object_constructor) + else: + raise NotImplementedError + + +def add_multi_constructor( + tag_prefix, multi_constructor, Loader=None, constructor=Constructor +): + # type: (Any, Any, Any, Any) -> None + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + if False and hasattr(Loader, 'add_multi_constructor'): + Loader.add_multi_constructor(tag_prefix, constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, ruyaml.loader.Loader): + Constructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + raise NotImplementedError + + +def add_representer( + data_type, object_representer, Dumper=None, representer=Representer +): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + object_representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_representer(data_type, object_representer) + else: + if hasattr(Dumper, 'add_representer'): + Dumper.add_representer(data_type, object_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, Dumper): + Representer.add_representer(data_type, object_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_representer(data_type, object_representer) + else: + raise NotImplementedError + + +# this code currently not tested +def add_multi_representer( + data_type, multi_representer, Dumper=None, representer=Representer +): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + multi_representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_multi_representer(data_type, multi_representer) + else: + if hasattr(Dumper, 'add_multi_representer'): + Dumper.add_multi_representer(data_type, multi_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, Dumper): + Representer.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_multi_representer(data_type, multi_representer) + else: + raise NotImplementedError + + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + + def __init__(cls, name, bases, kwds): + # type: (Any, Any, Any) -> None + super().__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore + cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore + + +class YAMLObject(metaclass=YAMLObjectMetaclass): # type: ignore + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_constructor = Constructor + yaml_representer = Representer + + yaml_tag = None # type: Any + yaml_flow_style = None # type: Any + + @classmethod + def from_yaml(cls, constructor, node): + # type: (Any, Any) -> Any + """ + Convert a representation node to a Python object. + """ + return constructor.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, representer, data): + # type: (Any, Any) -> Any + """ + Convert a Python object to a representation node. + """ + return representer.represent_yaml_object( + cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style + ) diff --git a/lib/ruyaml/nodes.py b/lib/ruyaml/nodes.py new file mode 100644 index 0000000..e9c0188 --- /dev/null +++ b/lib/ruyaml/nodes.py @@ -0,0 +1,146 @@ +# coding: utf-8 + +import sys + +from ruyaml.compat import _F + +if False: # MYPY + from typing import Any, Dict, Text # NOQA + + +class Node: + __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor' + + def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.comment = comment + self.anchor = anchor + + def __repr__(self): + # type: () -> Any + value = self.value + # if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = f'<{len(value)} items>' + # else: + # if len(value) > 75: + # value = repr(value[:70]+' ... ') + # else: + # value = repr(value) + value = repr(value) + return _F( + '{class_name!s}(tag={self_tag!r}, value={value!s})', + class_name=self.__class__.__name__, + self_tag=self.tag, + value=value, + ) + + def dump(self, indent=0): + # type: (int) -> None + if isinstance(self.value, str): + sys.stdout.write( + '{}{}(tag={!r}, value={!r})\n'.format( + ' ' * indent, self.__class__.__name__, self.tag, self.value + ) + ) + if self.comment: + sys.stdout.write( + ' {}comment: {})\n'.format(' ' * indent, self.comment) + ) + return + sys.stdout.write( + '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + for v in self.value: + if isinstance(v, tuple): + for v1 in v: + v1.dump(indent + 1) + elif isinstance(v, Node): + v.dump(indent + 1) + else: + sys.stdout.write('Node value type? {}\n'.format(type(v))) + + +class ScalarNode(Node): + """ + styles: + ? -> set() ? key, no value + " -> double quoted + ' -> single quoted + | -> literal style + > -> folding style + """ + + __slots__ = ('style',) + id = 'scalar' + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__( + self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor + ) + self.style = style + + +class CollectionNode(Node): + __slots__ = ('flow_style',) + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment) + self.flow_style = flow_style + self.anchor = anchor + + +class SequenceNode(CollectionNode): + __slots__ = () + id = 'sequence' + + +class MappingNode(CollectionNode): + __slots__ = ('merge',) + id = 'mapping' + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + CollectionNode.__init__( + self, tag, value, start_mark, end_mark, flow_style, comment, anchor + ) + self.merge = None diff --git a/lib/ruyaml/parser.py b/lib/ruyaml/parser.py new file mode 100644 index 0000000..f17331b --- /dev/null +++ b/lib/ruyaml/parser.py @@ -0,0 +1,938 @@ +# coding: utf-8 + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* +# STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | +# indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* +# BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START <} +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START +# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR +# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START +# FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } + +# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py +# only to not do anything with the package afterwards +# and for Jython too + + +from ruyaml.comments import C_POST, C_PRE, C_SPLIT_ON_FIRST_BLANK +from ruyaml.compat import _F, nprint, nprintf # NOQA +from ruyaml.error import MarkedYAMLError +from ruyaml.events import * # NOQA +from ruyaml.scanner import ( # NOQA + BlankLineComment, + RoundTripScanner, + Scanner, + ScannerError, +) +from ruyaml.tokens import * # NOQA + +if False: # MYPY + from typing import Any, Dict, List, Optional # NOQA + +__all__ = ['Parser', 'RoundTripParser', 'ParserError'] + + +def xprintf(*args, **kw): + # type: (Any, Any) -> Any + return nprintf(*args, **kw) + pass + + +class ParserError(MarkedYAMLError): + pass + + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = {'!': '!', '!!': 'tag:yaml.org,2002:'} + + def __init__(self, loader): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_parser', None) is None: + self.loader._parser = self + self.reset_parser() + + def reset_parser(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.current_event = self.last_event = None + self.tag_handles = {} # type: Dict[Any, Any] + self.states = [] # type: List[Any] + self.marks = [] # type: List[Any] + self.state = self.parse_stream_start # type: Any + + def dispose(self): + # type: () -> None + self.reset_parser() + + @property + def scanner(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.scanner + return self.loader._scanner + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_event(self, *choices): + # type: (Any) -> bool + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # type: () -> Any + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # type: () -> Any + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + # assert self.current_event is not None + # if self.current_event.end_mark.line != self.peek_event().start_mark.line: + xprintf( + 'get_event', repr(self.current_event), self.peek_event().start_mark.line + ) + self.last_event = value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* + # STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + # type: () -> Any + # Parse the stream start. + token = self.scanner.get_token() + self.move_token_comment(token) + event = StreamStartEvent( + token.start_mark, token.end_mark, encoding=token.encoding + ) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + # type: () -> Any + # Parse an implicit document. + if not self.scanner.check_token( + DirectiveToken, DocumentStartToken, StreamEndToken + ): + self.tag_handles = self.DEFAULT_TAGS + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + # type: () -> Any + # Parse any extra document end indicators. + while self.scanner.check_token(DocumentEndToken): + self.scanner.get_token() + # Parse an explicit document. + if not self.scanner.check_token(StreamEndToken): + version, tags = self.process_directives() + if not self.scanner.check_token(DocumentStartToken): + raise ParserError( + None, + None, + _F( + "expected '', but found {pt!r}", + pt=self.scanner.peek_token().id, + ), + self.scanner.peek_token().start_mark, + ) + token = self.scanner.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + # if self.loader is not None and \ + # end_mark.line != self.scanner.peek_token().start_mark.line: + # self.loader.scalar_after_indicator = False + event = DocumentStartEvent( + start_mark, end_mark, explicit=True, version=version, tags=tags + ) # type: Any + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.scanner.get_token() + event = StreamEndEvent( + token.start_mark, token.end_mark, comment=token.comment + ) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + # type: () -> Any + # Parse the document end. + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.scanner.check_token(DocumentEndToken): + token = self.scanner.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) + + # Prepare the next state. + if self.resolver.processing_version == (1, 1): + self.state = self.parse_document_start + else: + self.state = self.parse_implicit_document_start + + return event + + def parse_document_content(self): + # type: () -> Any + if self.scanner.check_token( + DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken + ): + event = self.process_empty_scalar(self.scanner.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + # type: () -> Any + yaml_version = None + self.tag_handles = {} + while self.scanner.check_token(DirectiveToken): + token = self.scanner.get_token() + if token.name == 'YAML': + if yaml_version is not None: + raise ParserError( + None, None, 'found duplicate YAML directive', token.start_mark + ) + major, minor = token.value + if major != 1: + raise ParserError( + None, + None, + 'found incompatible YAML document (version 1.* is required)', + token.start_mark, + ) + yaml_version = token.value + elif token.name == 'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError( + None, + None, + _F('duplicate tag handle {handle!r}', handle=handle), + token.start_mark, + ) + self.tag_handles[handle] = prefix + if bool(self.tag_handles): + value = yaml_version, self.tag_handles.copy() # type: Any + else: + value = yaml_version, None + if self.loader is not None and hasattr(self.loader, 'tags'): + self.loader.version = yaml_version + if self.loader.tags is None: + self.loader.tags = {} + for k in self.tag_handles: + self.loader.tags[k] = self.tag_handles[k] + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + # type: () -> Any + return self.parse_node(block=True) + + def parse_flow_node(self): + # type: () -> Any + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + # type: () -> Any + return self.parse_node(block=True, indentless_sequence=True) + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + return self.tag_handles[handle] + suffix + + def parse_node(self, block=False, indentless_sequence=False): + # type: (bool, bool) -> Any + if self.scanner.check_token(AliasToken): + token = self.scanner.get_token() + event = AliasEvent( + token.value, token.start_mark, token.end_mark + ) # type: Any + self.state = self.states.pop() + return event + + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + self.move_token_comment(token) + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.scanner.check_token(TagToken): + token = self.scanner.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.scanner.check_token(TagToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError( + 'while parsing a node', + start_mark, + _F('found undefined tag handle {handle!r}', handle=handle), + tag_mark, + ) + tag = self.transform_tag(handle, suffix) + else: + tag = suffix + # if tag == '!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' + # and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.scanner.peek_token().start_mark + event = None + implicit = tag is None or tag == '!' + if indentless_sequence and self.scanner.check_token(BlockEntryToken): + comment = None + pt = self.scanner.peek_token() + if self.loader and self.loader.comment_handling is None: + if pt.comment and pt.comment[0]: + comment = [pt.comment[0], []] + pt.comment[0] = None + elif self.loader: + if pt.comment: + comment = pt.comment + end_mark = self.scanner.peek_token().end_mark + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=False, + comment=comment, + ) + self.state = self.parse_indentless_sequence_entry + return event + + if self.scanner.check_token(ScalarToken): + token = self.scanner.get_token() + # self.scanner.peek_token_same_line_comment(token) + end_mark = token.end_mark + if (token.plain and tag is None) or tag == '!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + # nprint('se', token.value, token.comment) + event = ScalarEvent( + anchor, + tag, + implicit, + token.value, + start_mark, + end_mark, + style=token.style, + comment=token.comment, + ) + self.state = self.states.pop() + elif self.scanner.check_token(FlowSequenceStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_sequence_first_entry + elif self.scanner.check_token(FlowMappingStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = MappingStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_mapping_first_key + elif block and self.scanner.check_token(BlockSequenceStartToken): + end_mark = self.scanner.peek_token().start_mark + # should inserting the comment be dependent on the + # indentation? + pt = self.scanner.peek_token() + comment = pt.comment + # nprint('pt0', type(pt)) + if comment is None or comment[1] is None: + comment = pt.split_old_comment() + # nprint('pt1', comment) + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=False, + comment=comment, + ) + self.state = self.parse_block_sequence_first_entry + elif block and self.scanner.check_token(BlockMappingStartToken): + end_mark = self.scanner.peek_token().start_mark + comment = self.scanner.peek_token().comment + event = MappingStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=False, + comment=comment, + ) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent( + anchor, tag, (implicit, False), "", start_mark, end_mark + ) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.scanner.peek_token() + raise ParserError( + _F('while parsing a {node!s} node', node=node), + start_mark, + _F( + 'expected the node content, but found {token_id!r}', + token_id=token.id, + ), + token.start_mark, + ) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* + # BLOCK-END + + def parse_block_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + # move any comment from start token + # self.move_token_comment(token) + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block collection', + self.marks[-1], + _F('expected , but found {token_id!r}', token_id=token.id), + token.start_mark, + ) + token = self.scanner.get_token() # BlockEndToken + event = SequenceEndEvent( + token.start_mark, token.end_mark, comment=token.comment + ) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + # indentless_sequence? + # sequence: + # - entry + # - nested + + def parse_indentless_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token( + BlockEntryToken, KeyToken, ValueToken, BlockEndToken + ): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.scanner.peek_token() + c = None + if self.loader and self.loader.comment_handling is None: + c = token.comment + start_mark = token.start_mark + else: + start_mark = self.last_event.end_mark # type: ignore + c = self.distribute_comment(token.comment, start_mark.line) # type: ignore + event = SequenceEndEvent(start_mark, start_mark, comment=c) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + # type: () -> Any + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + self.move_token_comment(token) + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if self.resolver.processing_version > (1, 1) and self.scanner.check_token( + ValueToken + ): + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block mapping', + self.marks[-1], + _F('expected , but found {token_id!r}', token_id=token.id), + token.start_mark, + ) + token = self.scanner.get_token() + self.move_token_comment(token) + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + # value token might have post comment move it to e.g. block + if self.scanner.check_token(ValueToken): + self.move_token_comment(token) + else: + if not self.scanner.check_token(KeyToken): + self.move_token_comment(token, empty=True) + # else: empty value for this key cannot move token.comment + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + comment = token.comment + if comment is None: + token = self.scanner.peek_token() + comment = token.comment + if comment: + token._comment = [None, comment[1]] + comment = [comment[0], None] + return self.process_empty_scalar(token.end_mark, comment=comment) + else: + self.state = self.parse_block_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + # type: (bool) -> Any + if not self.scanner.check_token(FlowSequenceEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow sequence', + self.marks[-1], + _F( + "expected ',' or ']', but got {token_id!r}", + token_id=token.id, + ), + token.start_mark, + ) + + if self.scanner.check_token(KeyToken): + token = self.scanner.peek_token() + event = MappingStartEvent( + None, None, True, token.start_mark, token.end_mark, flow_style=True + ) # type: Any + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.scanner.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.scanner.get_token() + event = SequenceEndEvent( + token.start_mark, token.end_mark, comment=token.comment + ) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + # type: () -> Any + token = self.scanner.get_token() + if not self.scanner.check_token( + ValueToken, FlowEntryToken, FlowSequenceEndToken + ): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + # type: () -> Any + self.state = self.parse_flow_sequence_entry + token = self.scanner.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + # type: (Any) -> Any + if not self.scanner.check_token(FlowMappingEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow mapping', + self.marks[-1], + _F( + "expected ',' or '}}', but got {token_id!r}", + token_id=token.id, + ), + token.start_mark, + ) + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + if not self.scanner.check_token( + ValueToken, FlowEntryToken, FlowMappingEndToken + ): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif self.resolver.processing_version > (1, 1) and self.scanner.check_token( + ValueToken + ): + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().end_mark) + elif not self.scanner.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.scanner.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + # type: () -> Any + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + + def process_empty_scalar(self, mark, comment=None): + # type: (Any, Any) -> Any + return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment) + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Optional[Any], Optional[bool]) -> Any + pass + + +class RoundTripParser(Parser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + # return self.tag_handles[handle]+suffix + if handle == '!!' and suffix in ( + 'null', + 'bool', + 'int', + 'float', + 'binary', + 'timestamp', + 'omap', + 'pairs', + 'set', + 'str', + 'seq', + 'map', + ): + return Parser.transform_tag(self, handle, suffix) + return handle + suffix + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Optional[Any], Optional[bool]) -> Any + token.move_old_comment( + self.scanner.peek_token() if nt is None else nt, empty=empty + ) + + +class RoundTripParserSC(RoundTripParser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + # some of the differences are based on the superclass testing + # if self.loader.comment_handling is not None + + def move_token_comment(self, token, nt=None, empty=False): + # type: (Any, Any, Any, Optional[bool]) -> None + token.move_new_comment( + self.scanner.peek_token() if nt is None else nt, empty=empty + ) + + def distribute_comment(self, comment, line): + # type: (Any, Any) -> Any + # ToDo, look at indentation of the comment to determine attachment + if comment is None: + return None + if not comment[0]: + return None + if comment[0][0] != line + 1: + nprintf('>>>dcxxx', comment, line) + assert comment[0][0] == line + 1 + # if comment[0] - line > 1: + # return + typ = self.loader.comment_handling & 0b11 + # nprintf('>>>dca', comment, line, typ) + if typ == C_POST: + return None + if typ == C_PRE: + c = [None, None, comment[0]] + comment[0] = None + return c + # nprintf('>>>dcb', comment[0]) + for _idx, cmntidx in enumerate(comment[0]): + # nprintf('>>>dcb', cmntidx) + if isinstance(self.scanner.comments[cmntidx], BlankLineComment): + break + else: + return None # no space found + if _idx == 0: + return None # first line was blank + # nprintf('>>>dcc', idx) + if typ == C_SPLIT_ON_FIRST_BLANK: + c = [None, None, comment[0][:_idx]] + comment[0] = comment[0][_idx:] + return c + raise NotImplementedError # reserved diff --git a/lib/ruyaml/py.typed b/lib/ruyaml/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/lib/ruyaml/reader.py b/lib/ruyaml/reader.py new file mode 100644 index 0000000..57bec31 --- /dev/null +++ b/lib/ruyaml/reader.py @@ -0,0 +1,315 @@ +# coding: utf-8 + +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` +# characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current +# character. + +import codecs +from typing import Any, Optional, Text, Tuple + +from ruyaml.compat import _F # NOQA +from ruyaml.error import FileMark, StringMark, YAMLError, YAMLStreamError +from ruyaml.util import RegExp + +# from ruyaml.compat import StreamTextType # NOQA + +__all__ = ['Reader', 'ReaderError'] + + +class ReaderError(YAMLError): + def __init__(self, name, position, character, encoding, reason): + # type: (Any, Any, Any, Any, Any) -> None + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + # type: () -> Any + if isinstance(self.character, bytes): + return _F( + "'{self_encoding!s}' codec can't decode byte #x{ord_self_character:02x}: " + '{self_reason!s}\n' + ' in "{self_name!s}", position {self_position:d}', + self_encoding=self.encoding, + ord_self_character=ord(self.character), + self_reason=self.reason, + self_name=self.name, + self_position=self.position, + ) + else: + return _F( + 'unacceptable character #x{self_character:04x}: {self_reason!s}\n' + ' in "{self_name!s}", position {self_position:d}', + self_character=self.character, + self_reason=self.reason, + self_name=self.name, + self_position=self.position, + ) + + +class Reader: + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `bytes` object, + # - a `str` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream, loader=None): + # type: (Any, Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_reader', None) is None: + self.loader._reader = self + self.reset_reader() + self.stream = stream # type: Any # as .read is called + + def reset_reader(self): + # type: () -> None + self.name = None # type: Any + self.stream_pointer = 0 + self.eof = True + self.buffer = "" + self.pointer = 0 + self.raw_buffer = None # type: Any + self.raw_decode = None + self.encoding = None # type: Optional[Text] + self.index = 0 + self.line = 0 + self.column = 0 + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('input stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + self._stream = None + if isinstance(val, str): + self.name = '' + self.check_printable(val) + self.buffer = val + '\0' + elif isinstance(val, bytes): + self.name = '' + self.raw_buffer = val + self.determine_encoding() + else: + if not hasattr(val, 'read'): + raise YAMLStreamError('stream argument needs to have a read() method') + self._stream = val + self.name = getattr(self.stream, 'name', '') + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + # type: (int) -> Text + try: + return self.buffer[self.pointer + index] + except IndexError: + self.update(index + 1) + return self.buffer[self.pointer + index] + + def prefix(self, length=1): + # type: (int) -> Any + if self.pointer + length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer : self.pointer + length] + + def forward_1_1(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in '\n\x85\u2028\u2029' or ( + ch == '\r' and self.buffer[self.pointer] != '\n' + ): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def forward(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch == '\n' or (ch == '\r' and self.buffer[self.pointer] != '\n'): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + # type: () -> Any + if self.stream is None: + return StringMark( + self.name, self.index, self.line, self.column, self.buffer, self.pointer + ) + else: + return FileMark(self.name, self.index, self.line, self.column) + + def determine_encoding(self): + # type: () -> None + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, bytes): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode # type: ignore + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode # type: ignore + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode # type: ignore + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = RegExp( + '[^\x09\x0A\x0D\x20-\x7E\x85' + '\xA0-\uD7FF' + '\uE000-\uFFFD' + '\U00010000-\U0010FFFF' + ']' + ) + + _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode( + 'ascii' + ) + + @classmethod + def _get_non_printable_ascii(cls, data): # type: ignore + # type: (Text, bytes) -> Optional[Tuple[int, Text]] + ascii_bytes = data.encode('ascii') # type: ignore + non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore + if not non_printables: + return None + non_printable = non_printables[:1] + return ascii_bytes.index(non_printable), non_printable.decode('ascii') + + @classmethod + def _get_non_printable_regex(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + match = cls.NON_PRINTABLE.search(data) + if not bool(match): + return None + return match.start(), match.group() + + @classmethod + def _get_non_printable(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + try: + return cls._get_non_printable_ascii(data) # type: ignore + except UnicodeEncodeError: + return cls._get_non_printable_regex(data) + + def check_printable(self, data): + # type: (Any) -> None + non_printable_match = self._get_non_printable(data) + if non_printable_match is not None: + start, character = non_printable_match + position = self.index + (len(self.buffer) - self.pointer) + start + raise ReaderError( + self.name, + position, + ord(character), + 'unicode', + 'special characters are not allowed', + ) + + def update(self, length): + # type: (int) -> None + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer :] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode( + self.raw_buffer, 'strict', self.eof + ) + except UnicodeDecodeError as exc: + character = self.raw_buffer[exc.start] + if self.stream is not None: + position = ( + self.stream_pointer - len(self.raw_buffer) + exc.start + ) + elif self.stream is not None: + position = ( + self.stream_pointer - len(self.raw_buffer) + exc.start + ) + else: + position = exc.start + raise ReaderError( + self.name, position, character, exc.encoding, exc.reason + ) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=None): + # type: (Optional[int]) -> None + if size is None: + size = 4096 + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True + + +# try: +# import psyco +# psyco.bind(Reader) +# except ImportError: +# pass diff --git a/lib/ruyaml/representer.py b/lib/ruyaml/representer.py new file mode 100644 index 0000000..17d6356 --- /dev/null +++ b/lib/ruyaml/representer.py @@ -0,0 +1,1197 @@ +# coding: utf-8 + +import base64 +import copyreg +import datetime +import types +from collections import OrderedDict + +from ruyaml.anchor import Anchor +from ruyaml.comments import ( + CommentedKeyMap, + CommentedKeySeq, + CommentedMap, + CommentedOrderedMap, + CommentedSeq, + CommentedSet, + TaggedScalar, + comment_attrib, + merge_attrib, +) +from ruyaml.compat import ordereddict # NOQA; type: ignore +from ruyaml.compat import _F +from ruyaml.error import * # NOQA +from ruyaml.nodes import * # NOQA +from ruyaml.scalarbool import ScalarBoolean +from ruyaml.scalarfloat import ScalarFloat +from ruyaml.scalarint import BinaryInt, HexCapsInt, HexInt, OctalInt, ScalarInt +from ruyaml.scalarstring import ( + DoubleQuotedScalarString, + FoldedScalarString, + LiteralScalarString, + PlainScalarString, + SingleQuotedScalarString, +) +from ruyaml.timestamp import TimeStamp + +if False: # MYPY + from typing import Any, Dict, List, Optional, Text, Union # NOQA + +# fmt: off +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError', 'RoundTripRepresenter'] +# fmt: on + + +class RepresenterError(YAMLError): + pass + + +class BaseRepresenter: + + yaml_representers = {} # type: Dict[Any, Any] + yaml_multi_representers = {} # type: Dict[Any, Any] + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any, Any) -> None + self.dumper = dumper + if self.dumper is not None: + self.dumper._representer = self + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} # type: Dict[Any, Any] + self.object_keeper = [] # type: List[Any] + self.alias_key = None # type: Optional[int] + self.sort_base_mapping_type_on_output = True + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer # type: ignore + return self.dumper._serializer # type: ignore + except AttributeError: + return self # cyaml + + def represent(self, data): + # type: (Any) -> None + node = self.represent_data(data) + self.serializer.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + # type: (Any) -> Any + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + # if node is None: + # raise RepresenterError( + # f"recursive objects are not allowed: {data!r}") + return node + # self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, str(data)) + # if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def represent_key(self, data): + # type: (Any) -> Any + """ + David Fraser: Extract a method to represent keys in mappings, so that + a subclass can choose not to quote them (for example) + used in represent_mapping + https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c + """ + return self.represent_data(data) + + @classmethod + def add_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_representers' not in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_multi_representers' not in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if style is None: + style = self.default_style + comment = None + if style and style[0] in '|>': + comment = getattr(value, 'comment', None) + if comment: + comment = [None, [comment]] + node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_base_mapping_type_on_output: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + # type: (Any) -> bool + return False + + +class SafeRepresenter(BaseRepresenter): + def ignore_aliases(self, data): + # type: (Any) -> bool + # https://docs.python.org/3/reference/expressions.html#parenthesized-forms : + # "i.e. two occurrences of the empty tuple may or may not yield the same object" + # so "data is ()" should not be used + if data is None or (isinstance(data, tuple) and data == ()): + return True + if isinstance(data, (bytes, str, bool, int, float)): + return True + return False + + def represent_none(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + + def represent_str(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + # type: (Any) -> Any + data = base64.encodebytes(data).decode('ascii') + return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') + + def represent_bool(self, data, anchor=None): + # type: (Any, Optional[Any]) -> Any + try: + value = self.dumper.boolean_representation[bool(data)] # type: ignore + except AttributeError: + if data: + value = 'true' + else: + value = 'false' + return self.represent_scalar('tag:yaml.org,2002:bool', value, anchor=anchor) + + def represent_int(self, data): + # type: (Any) -> Any + return self.represent_scalar('tag:yaml.org,2002:int', str(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value * inf_value): + inf_value *= inf_value + + def represent_float(self, data): + # type: (Any) -> Any + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + else: + value = repr(data).lower() + if getattr(self.serializer, 'use_version', None) == (1, 1): + if '.' not in value and 'e' in value: + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag in YAML 1.1. We fix + # this by adding '.0' before the 'e' symbol. + value = value.replace('e', '.0e', 1) + return self.represent_scalar('tag:yaml.org,2002:float', value) + + def represent_list(self, data): + # type: (Any) -> Any + # pairs = (len(data) > 0 and isinstance(data, list)) + # if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + # if not pairs: + return self.represent_sequence('tag:yaml.org,2002:seq', data) + + # value = [] + # for item_key, item_value in data: + # value.append(self.represent_mapping('tag:yaml.org,2002:map', + # [(item_key, item_value)])) + # return SequenceNode('tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + # type: (Any) -> Any + return self.represent_mapping('tag:yaml.org,2002:map', data) + + def represent_ordereddict(self, data): + # type: (Any) -> Any + return self.represent_omap('tag:yaml.org,2002:omap', data) + + def represent_set(self, data): + # type: (Any) -> Any + value = {} # type: Dict[Any, None] + for key in data: + value[key] = None + return self.represent_mapping('tag:yaml.org,2002:set', value) + + def represent_date(self, data): + # type: (Any) -> Any + value = data.isoformat() + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + # type: (Any) -> Any + value = data.isoformat(' ') + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + # type: (Any, Any, Any, Any) -> Any + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + # type: (Any) -> None + raise RepresenterError(_F('cannot represent an object: {data!s}', data=data)) + + +SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict) + +SafeRepresenter.add_representer(OrderedDict, SafeRepresenter.represent_ordereddict) + +SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) + + +class Representer(SafeRepresenter): + def represent_complex(self, data): + # type: (Any) -> Any + if data.imag == 0.0: + data = repr(data.real) + elif data.real == 0.0: + data = _F('{data_imag!r}j', data_imag=data.imag) + elif data.imag > 0: + data = _F( + '{data_real!r}+{data_imag!r}j', data_real=data.real, data_imag=data.imag + ) + else: + data = _F( + '{data_real!r}{data_imag!r}j', data_real=data.real, data_imag=data.imag + ) + return self.represent_scalar('tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + # type: (Any) -> Any + return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + # type: (Any) -> Any + try: + name = _F( + '{modname!s}.{qualname!s}', + modname=data.__module__, + qualname=data.__qualname__, + ) + except AttributeError: + # ToDo: check if this can be reached in Py3 + name = _F( + '{modname!s}.{name!s}', modname=data.__module__, name=data.__name__ + ) + return self.represent_scalar('tag:yaml.org,2002:python/name:' + name, "") + + def represent_module(self, data): + # type: (Any) -> Any + return self.represent_scalar( + 'tag:yaml.org,2002:python/module:' + data.__name__, "" + ) + + def represent_object(self, data): + # type: (Any) -> Any + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: # type: ignore + reduce = copyreg.dispatch_table[cls](data) # type: ignore + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError(_F('cannot represent object: {data!r}', data=data)) + reduce = (list(reduce) + [None] * 5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = 'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = 'tag:yaml.org,2002:python/object/apply:' + newobj = False + try: + function_name = _F( + '{fun!s}.{qualname!s}', + fun=function.__module__, + qualname=function.__qualname__, + ) + except AttributeError: + # ToDo: check if this can be reached in Py3 + function_name = _F( + '{fun!s}.{name!s}', fun=function.__module__, name=function.__name__ + ) + if ( + not args + and not listitems + and not dictitems + and isinstance(state, dict) + and newobj + ): + return self.represent_mapping( + 'tag:yaml.org,2002:python/object:' + function_name, state + ) + if not listitems and not dictitems and isinstance(state, dict) and not state: + return self.represent_sequence(tag + function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag + function_name, value) + + +Representer.add_representer(complex, Representer.represent_complex) + +Representer.add_representer(tuple, Representer.represent_tuple) + +Representer.add_representer(type, Representer.represent_name) + +Representer.add_representer(types.FunctionType, Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) + +Representer.add_representer(types.ModuleType, Representer.represent_module) + +Representer.add_multi_representer(object, Representer.represent_object) + +Representer.add_multi_representer(type, Representer.represent_name) + + +class RoundTripRepresenter(SafeRepresenter): + # need to add type here and write out the .comment + # in serializer and emitter + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any) -> None + if not hasattr(dumper, 'typ') and default_flow_style is None: + default_flow_style = False + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=dumper, + ) + + def ignore_aliases(self, data): + # type: (Any) -> bool + try: + if data.anchor is not None and data.anchor.value is not None: + return False + except AttributeError: + pass + return SafeRepresenter.ignore_aliases(self, data) + + def represent_none(self, data): + # type: (Any) -> Any + if ( + len(self.represented_objects) == 0 + and not self.serializer.use_explicit_start + ): + # this will be open ended (although it is not yet) + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + return self.represent_scalar('tag:yaml.org,2002:null', "") + + def represent_literal_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '|' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + represent_preserved_scalarstring = represent_literal_scalarstring + + def represent_folded_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '>' + anchor = data.yaml_anchor(any=True) + for fold_pos in reversed(getattr(data, 'fold_pos', [])): + if ( + data[fold_pos] == ' ' + and (fold_pos > 0 and not data[fold_pos - 1].isspace()) + and (fold_pos < len(data) and not data[fold_pos + 1].isspace()) + ): + data = data[:fold_pos] + '\a' + data[fold_pos:] + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_single_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = "'" + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_double_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '"' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_plain_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '' + anchor = data.yaml_anchor(any=True) + tag = 'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def insert_underscore(self, prefix, s, underscore, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if underscore is None: + return self.represent_scalar( + 'tag:yaml.org,2002:int', prefix + s, anchor=anchor + ) + if underscore[0]: + sl = list(s) + pos = len(s) - underscore[0] + while pos > 0: + sl.insert(pos, '_') + pos -= underscore[0] + s = "".join(sl) + if underscore[1]: + s = '_' + s + if underscore[2]: + s += '_' + return self.represent_scalar('tag:yaml.org,2002:int', prefix + s, anchor=anchor) + + def represent_scalar_int(self, data): + # type: (Any) -> Any + if data._width is not None: + s = '{:0{}d}'.format(data, data._width) + else: + s = format(data, 'd') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore("", s, data._underscore, anchor=anchor) + + def represent_binary_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}b}', that strips the zeros + s = '{:0{}b}'.format(data, data._width) + else: + s = format(data, 'b') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0b', s, data._underscore, anchor=anchor) + + def represent_octal_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}o}', that strips the zeros + s = '{:0{}o}'.format(data, data._width) + else: + s = format(data, 'o') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0o', s, data._underscore, anchor=anchor) + + def represent_hex_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}x}', that strips the zeros + s = '{:0{}x}'.format(data, data._width) + else: + s = format(data, 'x') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_hex_caps_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}X}', that strips the zeros + s = '{:0{}X}'.format(data, data._width) + else: + s = format(data, 'X') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_scalar_float(self, data): + # type: (Any) -> Any + """this is way more complicated""" + value = None + anchor = data.yaml_anchor(any=True) + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + if value: + return self.represent_scalar( + 'tag:yaml.org,2002:float', value, anchor=anchor + ) + if data._exp is None and data._prec > 0 and data._prec == data._width - 1: + # no exponent, but trailing dot + value = '{}{:d}.'.format( + data._m_sign if data._m_sign else "", abs(int(data)) + ) + elif data._exp is None: + # no exponent, "normal" dot + prec = data._prec + ms = data._m_sign if data._m_sign else "" + # -1 for the dot + value = '{}{:0{}.{}f}'.format( + ms, abs(data), data._width - len(ms), data._width - prec - 1 + ) + if prec == 0 or (prec == 1 and ms != ""): + value = value.replace('0.', '.') + while len(value) < data._width: + value += '0' + else: + # exponent + m, es = '{:{}.{}e}'.format( + # data, data._width, data._width - data._prec + (1 if data._m_sign else 0) + data, + data._width, + data._width + (1 if data._m_sign else 0), + ).split('e') + w = data._width if data._prec > 0 else (data._width + 1) + if data < 0: + w += 1 + m = m[:w] + e = int(es) + m1, m2 = m.split('.') # always second? + while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0): + m2 += '0' + if data._m_sign and data > 0: + m1 = '+' + m1 + esgn = '+' if data._e_sign else "" + if data._prec < 0: # mantissa without dot + if m2 != '0': + e -= len(m2) + else: + m2 = "" + while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width: + m2 += '0' + e -= 1 + value = m1 + m2 + data._exp + '{:{}0{}d}'.format(e, esgn, data._e_width) + elif data._prec == 0: # mantissa with trailing dot + e -= len(m2) + value = ( + m1 + + m2 + + '.' + + data._exp + + '{:{}0{}d}'.format(e, esgn, data._e_width) + ) + else: + if data._m_lead0 > 0: + m2 = '0' * (data._m_lead0 - 1) + m1 + m2 + m1 = '0' + m2 = m2[: -data._m_lead0] # these should be zeros + e += data._m_lead0 + while len(m1) < data._prec: + m1 += m2[0] + m2 = m2[1:] + e -= 1 + value = ( + m1 + + '.' + + m2 + + data._exp + + '{:{}0{}d}'.format(e, esgn, data._e_width) + ) + + if value is None: + value = repr(data).lower() + return self.represent_scalar('tag:yaml.org,2002:float', value, anchor=anchor) + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + # if the flow_style is None, the flow style tacked on to the object + # explicitly will be taken. If that is None as well the default flow + # style rules + try: + flow_style = sequence.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = sequence.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(sequence, comment_attrib) + node.comment = comment.comment + # reset any comment already printed information + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + item_comments = comment.items + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for idx, item in enumerate(sequence): + node_item = self.represent_data(item) + self.merge_comments(node_item, item_comments.get(idx)) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if len(sequence) != 0 and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def merge_comments(self, node, comments): + # type: (Any, Any) -> Any + if comments is None: + assert hasattr(node, 'comment') + return node + if getattr(node, 'comment', None) is not None: + for idx, val in enumerate(comments): + if idx >= len(node.comment): + continue + nc = node.comment[idx] + if nc is not None: + assert val is None or val == nc + comments[idx] = nc + node.comment = comments + return node + + def represent_key(self, data): + # type: (Any) -> Any + if isinstance(data, CommentedKeySeq): + self.alias_key = None + return self.represent_sequence( + 'tag:yaml.org,2002:seq', data, flow_style=True + ) + if isinstance(data, CommentedKeyMap): + self.alias_key = None + return self.represent_mapping( + 'tag:yaml.org,2002:map', data, flow_style=True + ) + return SafeRepresenter.represent_key(self, data) + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = mapping.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = mapping.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(mapping, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + if self.dumper.comment_handling is None: # type: ignore + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + else: + # NEWCMNT + pass + except AttributeError: + item_comments = {} + merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])] + try: + merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0] + except IndexError: + merge_pos = 0 + item_count = 0 + if bool(merge_list): + items = mapping.non_merged_items() + else: + items = mapping.items() + for item_key, item_value in items: + item_count += 1 + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + item_comment = item_comments.get(item_key) + if item_comment: + # assert getattr(node_key, 'comment', None) is None + # issue 351 did throw this because the comment from the list item was + # moved to the dict + node_key.comment = item_comment[:2] + nvc = getattr(node_value, 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_value.comment = item_comment[2:] + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if ( + (item_count != 0) or bool(merge_list) + ) and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + if bool(merge_list): + # because of the call to represent_data here, the anchors + # are marked as being used and thereby created + if len(merge_list) == 1: + arg = self.represent_data(merge_list[0]) + else: + arg = self.represent_data(merge_list) + arg.flow_style = True + value.insert(merge_pos, (ScalarNode('tag:yaml.org,2002:merge', '<<'), arg)) + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = omap.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = omap.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(omap, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # node_item.flow_style = False + # node item has two scalars in value: node_key and node_value + item_comment = item_comments.get(item_key) + if item_comment: + if item_comment[1]: + node_item.comment = [None, item_comment[1]] + assert getattr(node_item.value[0][0], 'comment', None) is None + node_item.value[0][0].comment = [item_comment[0], None] + nvc = getattr(node_item.value[0][1], 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_item.value[0][1].comment = item_comment[2:] + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_set(self, setting): + # type: (Any) -> Any + flow_style = False + tag = 'tag:yaml.org,2002:set' + # return self.represent_mapping(tag, value) + value = [] # type: List[Any] + flow_style = setting.fa.flow_style(flow_style) + try: + anchor = setting.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(setting, comment_attrib) + if node.comment is None: + node.comment = comment.comment + else: + # as we are potentially going to extend this, make a new list + node.comment = comment.comment[:] + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in setting.odict: + node_key = self.represent_key(item_key) + node_value = self.represent_data(None) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + node_key.style = node_value.style = '?' + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + best_style = best_style + return node + + def represent_dict(self, data): + # type: (Any) -> Any + """write out tag if saved on loading""" + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = 'tag:yaml.org,2002:map' + return self.represent_mapping(tag, data) + + def represent_list(self, data): + # type: (Any) -> Any + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = 'tag:yaml.org,2002:seq' + return self.represent_sequence(tag, data) + + def represent_datetime(self, data): + # type: (Any) -> Any + inter = 'T' if data._yaml['t'] else ' ' + _yaml = data._yaml + if _yaml['delta']: + data += _yaml['delta'] + value = data.isoformat(inter) + else: + value = data.isoformat(inter) + if _yaml['tz']: + value += _yaml['tz'] + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_tagged_scalar(self, data): + # type: (Any) -> Any + try: + tag = data.tag.value + except AttributeError: + tag = None + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor) + + def represent_scalar_bool(self, data): + # type: (Any) -> Any + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return SafeRepresenter.represent_bool(self, data, anchor=anchor) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + anchor = state.pop(Anchor.attrib, None) + res = self.represent_mapping(tag, state, flow_style=flow_style) + if anchor is not None: + res.anchor = anchor + return res + + +RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none) + +RoundTripRepresenter.add_representer( + LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring +) + +RoundTripRepresenter.add_representer( + FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring +) + +RoundTripRepresenter.add_representer( + SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring +) + +# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple) + +RoundTripRepresenter.add_representer( + ScalarInt, RoundTripRepresenter.represent_scalar_int +) + +RoundTripRepresenter.add_representer( + BinaryInt, RoundTripRepresenter.represent_binary_int +) + +RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int) + +RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int) + +RoundTripRepresenter.add_representer( + HexCapsInt, RoundTripRepresenter.represent_hex_caps_int +) + +RoundTripRepresenter.add_representer( + ScalarFloat, RoundTripRepresenter.represent_scalar_float +) + +RoundTripRepresenter.add_representer( + ScalarBoolean, RoundTripRepresenter.represent_scalar_bool +) + +RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list) + +RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict) + +RoundTripRepresenter.add_representer( + CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict +) + +RoundTripRepresenter.add_representer( + OrderedDict, RoundTripRepresenter.represent_ordereddict +) + +RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set) + +RoundTripRepresenter.add_representer( + TaggedScalar, RoundTripRepresenter.represent_tagged_scalar +) + +RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime) diff --git a/lib/ruyaml/resolver.py b/lib/ruyaml/resolver.py new file mode 100644 index 0000000..24ae73f --- /dev/null +++ b/lib/ruyaml/resolver.py @@ -0,0 +1,421 @@ +# coding: utf-8 + +import re +from typing import Any, Dict, List, Optional, Text, Union # NOQA + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Optional # NOQA + from ruyaml.compat import VersionType # NOQA + +from ruyaml.compat import _DEFAULT_YAML_VERSION, _F # NOQA +from ruyaml.error import * # NOQA +from ruyaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA +from ruyaml.util import RegExp # NOQA + +__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver'] + + +# fmt: off +# resolvers consist of +# - a list of applicable version +# - a tag +# - a regexp +# - a list of first characters to match +implicit_resolvers = [ + ([(1, 2)], + 'tag:yaml.org,2002:bool', + RegExp('''^(?:true|True|TRUE|false|False|FALSE)$''', re.X), + list('tTfF')), + ([(1, 1)], + 'tag:yaml.org,2002:bool', + RegExp('''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list('yYnNtTfFoO')), + ([(1, 2)], + 'tag:yaml.org,2002:float', + RegExp('''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')), + ([(1, 1)], + 'tag:yaml.org,2002:float', + RegExp('''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')), + ([(1, 2)], + 'tag:yaml.org,2002:int', + RegExp('''^(?:[-+]?0b[0-1_]+ + |[-+]?0o?[0-7_]+ + |[-+]?[0-9_]+ + |[-+]?0x[0-9a-fA-F_]+)$''', re.X), + list('-+0123456789')), + ([(1, 1)], + 'tag:yaml.org,2002:int', + RegExp('''^(?:[-+]?0b[0-1_]+ + |[-+]?0?[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int + list('-+0123456789')), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:merge', + RegExp('^(?:<<)$'), + ['<']), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:null', + RegExp('''^(?: ~ + |null|Null|NULL + | )$''', re.X), + ['~', 'n', 'N', '']), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:timestamp', + RegExp('''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \\t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)? + (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list('0123456789')), + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:value', + RegExp('^(?:=)$'), + ['=']), + # The following resolver is only for documentation purposes. It cannot work + # because plain scalars cannot start with '!', '&', or '*'. + ([(1, 2), (1, 1)], + 'tag:yaml.org,2002:yaml', + RegExp('^(?:!|&|\\*)$'), + list('!&*')), +] +# fmt: on + + +class ResolverError(YAMLError): + pass + + +class BaseResolver: + + DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} # type: Dict[Any, Any] + yaml_path_resolvers = {} # type: Dict[Any, Any] + + def __init__(self, loadumper=None): + # type: (Any, Any) -> None + self.loadumper = loadumper + if ( + self.loadumper is not None + and getattr(self.loadumper, '_resolver', None) is None + ): + self.loadumper._resolver = self.loadumper + self._loader_version = None # type: Any + self.resolver_exact_paths = [] # type: List[Any] + self.resolver_prefix_paths = [] # type: List[Any] + + @property + def parser(self): + # type: () -> Any + if self.loadumper is not None: + if hasattr(self.loadumper, 'typ'): + return self.loadumper.parser + return self.loadumper._parser + return None + + @classmethod + def add_implicit_resolver_base(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) + for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) + for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first)) + + # @classmethod + # def add_implicit_resolver(cls, tag, regexp, first): + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # type: (Any, Any, Any) -> None + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if 'yaml_path_resolvers' not in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] # type: List[Any] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError( + _F('Invalid path element: {element!s}', element=element) + ) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif ( + node_check not in [ScalarNode, SequenceNode, MappingNode] + and not isinstance(node_check, str) + and node_check is not None + ): + raise ResolverError( + _F('Invalid node checker: {node_check!s}', node_check=node_check) + ) + if not isinstance(index_check, (str, int)) and index_check is not None: + raise ResolverError( + _F( + 'Invalid index checker: {index_check!s}', + index_check=index_check, + ) + ) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None: + raise ResolverError(_F('Invalid node kind: {kind!s}', kind=kind)) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + # type: (Any, Any) -> None + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix( + depth, path, kind, current_node, current_index + ): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + # type: () -> None + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, current_node, current_index): + # type: (int, Any, Any, Any, Any) -> bool + node_check, index_check = path[depth - 1] + if isinstance(node_check, str): + if current_node.tag != node_check: + return False + elif node_check is not None: # type: ignore + if not isinstance(current_node, node_check): # type: ignore + return False + if index_check is True and current_index is not None: # type: ignore + return False + if ( + index_check is False or index_check is None # type: ignore + ) and current_index is None: # type: ignore + return False + if isinstance(index_check, str): + if not ( + isinstance(current_index, ScalarNode) + and index_check == current_index.value # type: ignore + ): + return False + elif isinstance(index_check, int) and not isinstance( # type: ignore + index_check, bool # type: ignore + ): + if index_check != current_index: # type: ignore + return False + return True + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.yaml_implicit_resolvers.get("", []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + return None + + +class Resolver(BaseResolver): + pass + + +for ir in implicit_resolvers: + if (1, 2) in ir[0]: + Resolver.add_implicit_resolver_base(*ir[1:]) + + +class VersionedResolver(BaseResolver): + """ + contrary to the "normal" resolver, the smart resolver delays loading + the pattern matching rules. That way it can decide to load 1.1 rules + or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals + and Yes/No/On/Off booleans. + """ + + def __init__(self, version=None, loader=None, loadumper=None): + # type: (Optional[VersionType], Any, Any) -> None + if loader is None and loadumper is not None: + loader = loadumper + BaseResolver.__init__(self, loader) + self._loader_version = self.get_loader_version(version) + self._version_implicit_resolver = {} # type: Dict[Any, Any] + + def add_version_implicit_resolver(self, version, tag, regexp, first): + # type: (VersionType, Any, Any, Any) -> None + if first is None: + first = [None] + impl_resolver = self._version_implicit_resolver.setdefault(version, {}) + for ch in first: + impl_resolver.setdefault(ch, []).append((tag, regexp)) + + def get_loader_version(self, version): + # type: (Optional[VersionType]) -> Any + if version is None or isinstance(version, tuple): + return version + if isinstance(version, list): + return tuple(version) + # assume string + return tuple(map(int, version.split('.'))) + + @property + def versioned_resolver(self): + # type: () -> Any + """ + select the resolver based on the version we are parsing + """ + version = self.processing_version + if isinstance(version, str): + version = tuple(map(int, version.split('.'))) + if version not in self._version_implicit_resolver: + for x in implicit_resolvers: + if version in x[0]: + self.add_version_implicit_resolver(version, x[1], x[2], x[3]) + return self._version_implicit_resolver[version] + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.versioned_resolver.get("", []) + else: + resolvers = self.versioned_resolver.get(value[0], []) + resolvers += self.versioned_resolver.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + try: + version = self.loadumper._scanner.yaml_version # type: ignore + except AttributeError: + try: + if hasattr(self.loadumper, 'typ'): + version = self.loadumper.version # type: ignore + else: + version = self.loadumper._serializer.use_version # type: ignore # dumping + except AttributeError: + version = None + if version is None: + version = self._loader_version + if version is None: + version = _DEFAULT_YAML_VERSION + return version diff --git a/lib/ruyaml/scalarbool.py b/lib/ruyaml/scalarbool.py new file mode 100644 index 0000000..8cae835 --- /dev/null +++ b/lib/ruyaml/scalarbool.py @@ -0,0 +1,47 @@ +# coding: utf-8 + +""" +You cannot subclass bool, and this is necessary for round-tripping anchored +bool values (and also if you want to preserve the original way of writing) + +bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well. + +You can use these in an if statement, but not when testing equivalence +""" + +from ruyaml.anchor import Anchor + +if False: # MYPY + from typing import Any, Dict, List, Text # NOQA + +__all__ = ['ScalarBoolean'] + + +class ScalarBoolean(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + anchor = kw.pop('anchor', None) + b = int.__new__(cls, *args, **kw) + if anchor is not None: + b.yaml_set_anchor(anchor, always_dump=True) + return b + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump diff --git a/lib/ruyaml/scalarfloat.py b/lib/ruyaml/scalarfloat.py new file mode 100644 index 0000000..a9e5a18 --- /dev/null +++ b/lib/ruyaml/scalarfloat.py @@ -0,0 +1,135 @@ +# coding: utf-8 + +import sys + +from ruyaml.anchor import Anchor + +if False: # MYPY + from typing import Any, Dict, List, Text # NOQA + +__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat'] + + +class ScalarFloat(float): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) + prec = kw.pop('prec', None) + m_sign = kw.pop('m_sign', None) + m_lead0 = kw.pop('m_lead0', 0) + exp = kw.pop('exp', None) + e_width = kw.pop('e_width', None) + e_sign = kw.pop('e_sign', None) + underscore = kw.pop('underscore', None) + anchor = kw.pop('anchor', None) + v = float.__new__(cls, *args, **kw) + v._width = width + v._prec = prec + v._m_sign = m_sign + v._m_lead0 = m_lead0 + v._exp = exp + v._e_width = e_width + v._e_sign = e_sign + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) + a + x = type(self)(self + a) + x._width = self._width + x._underscore = ( + self._underscore[:] if self._underscore is not None else None + ) # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) // a + x = type(self)(self // a) + x._width = self._width + x._underscore = ( + self._underscore[:] if self._underscore is not None else None + ) # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) * a + x = type(self)(self * a) + x._width = self._width + x._underscore = ( + self._underscore[:] if self._underscore is not None else None + ) # NOQA + x._prec = self._prec # check for others + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) ** a + x = type(self)(self ** a) + x._width = self._width + x._underscore = ( + self._underscore[:] if self._underscore is not None else None + ) # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) - a + x = type(self)(self - a) + x._width = self._width + x._underscore = ( + self._underscore[:] if self._underscore is not None else None + ) # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + def dump(self, out=sys.stdout): + # type: (Any) -> Any + out.write( + 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format( + self, + self._width, # type: ignore + self._prec, # type: ignore + self._m_sign, # type: ignore + self._m_lead0, # type: ignore + self._underscore, # type: ignore + self._exp, # type: ignore + self._e_width, # type: ignore + self._e_sign, # type: ignore + ) + ) + + +class ExponentialFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) + + +class ExponentialCapsFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) diff --git a/lib/ruyaml/scalarint.py b/lib/ruyaml/scalarint.py new file mode 100644 index 0000000..f302117 --- /dev/null +++ b/lib/ruyaml/scalarint.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +from ruyaml.anchor import Anchor + +if False: # MYPY + from typing import Any, Dict, List, Text # NOQA + +__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt'] + + +class ScalarInt(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) + underscore = kw.pop('underscore', None) + anchor = kw.pop('anchor', None) + v = int.__new__(cls, *args, **kw) + v._width = width + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self + a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self // a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self * a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self ** a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self - a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class BinaryInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__( + cls, value, width=width, underscore=underscore, anchor=anchor + ) + + +class OctalInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__( + cls, value, width=width, underscore=underscore, anchor=anchor + ) + + +# mixed casing of A-F is not supported, when loading the first non digit +# determines the case + + +class HexInt(ScalarInt): + """uses lower case (a-f)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__( + cls, value, width=width, underscore=underscore, anchor=anchor + ) + + +class HexCapsInt(ScalarInt): + """uses upper case (A-F)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__( + cls, value, width=width, underscore=underscore, anchor=anchor + ) + + +class DecimalInt(ScalarInt): + """needed if anchor""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__( + cls, value, width=width, underscore=underscore, anchor=anchor + ) diff --git a/lib/ruyaml/scalarstring.py b/lib/ruyaml/scalarstring.py new file mode 100644 index 0000000..3695599 --- /dev/null +++ b/lib/ruyaml/scalarstring.py @@ -0,0 +1,152 @@ +# coding: utf-8 + +from ruyaml.anchor import Anchor + +if False: # MYPY + from typing import Any, Dict, List, Text # NOQA + +__all__ = [ + 'ScalarString', + 'LiteralScalarString', + 'FoldedScalarString', + 'SingleQuotedScalarString', + 'DoubleQuotedScalarString', + 'PlainScalarString', + # PreservedScalarString is the old name, as it was the first to be preserved on rt, + # use LiteralScalarString instead + 'PreservedScalarString', +] + + +class ScalarString(str): + __slots__ = Anchor.attrib + + def __new__(cls, *args, **kw): + # type: (Any, Any) -> Any + anchor = kw.pop('anchor', None) + ret_val = str.__new__(cls, *args, **kw) + if anchor is not None: + ret_val.yaml_set_anchor(anchor, always_dump=True) + return ret_val + + def replace(self, old, new, maxreplace=-1): + # type: (Any, Any, int) -> Any + return type(self)((str.replace(self, old, new, maxreplace))) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class LiteralScalarString(ScalarString): + __slots__ = 'comment' # the comment after the | on the first line + + style = '|' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +PreservedScalarString = LiteralScalarString + + +class FoldedScalarString(ScalarString): + __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line + + style = '>' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class SingleQuotedScalarString(ScalarString): + __slots__ = () + + style = "'" + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class DoubleQuotedScalarString(ScalarString): + __slots__ = () + + style = '"' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class PlainScalarString(ScalarString): + __slots__ = () + + style = '' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +def preserve_literal(s): + # type: (Text) -> Text + return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n')) + + +def walk_tree(base, map=None): + # type: (Any, Any) -> None + """ + the routine here walks over a simple yaml tree (recursing in + dict values and list items) and converts strings that + have multiple lines to literal scalars + + You can also provide an explicit (ordered) mapping for multiple transforms + (first of which is executed): + map = ruyaml.compat.ordereddict + map['\n'] = preserve_literal + map[':'] = SingleQuotedScalarString + walk_tree(data, map=map) + """ + from collections.abc import MutableMapping, MutableSequence + + if map is None: + map = {'\n': preserve_literal} + + if isinstance(base, MutableMapping): + for k in base: + v = base[k] # type: Text + if isinstance(v, str): + for ch in map: + if ch in v: + base[k] = map[ch](v) + break + else: + walk_tree(v, map=map) + elif isinstance(base, MutableSequence): + for idx, elem in enumerate(base): + if isinstance(elem, str): + for ch in map: + if ch in elem: + base[idx] = map[ch](elem) + break + else: + walk_tree(elem, map=map) diff --git a/lib/ruyaml/scanner.py b/lib/ruyaml/scanner.py new file mode 100644 index 0000000..a5a81dc --- /dev/null +++ b/lib/ruyaml/scanner.py @@ -0,0 +1,2491 @@ +# coding: utf-8 + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# RoundTripScanner +# COMMENT(value) +# +# Read comments in the Scanner code for more details. +# + +import inspect + +from ruyaml.compat import _F, check_anchorname_char, nprint, nprintf # NOQA +from ruyaml.error import CommentMark, MarkedYAMLError # NOQA +from ruyaml.tokens import * # NOQA + +if False: # MYPY + from typing import Any, Dict, List, Optional, Text, Union # NOQA + + from ruyaml.compat import VersionType # NOQA + +__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError'] + + +_THE_END = '\n\0\r\x85\u2028\u2029' +_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029' +_SPACE_TAB = ' \t' + + +def xprintf(*args, **kw): + # type: (Any, Any) -> Any + return nprintf(*args, **kw) + pass + + +class ScannerError(MarkedYAMLError): + pass + + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + # type: (Any, Any, int, int, int, Any) -> None + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + + +class Scanner: + def __init__(self, loader=None): + # type: (Any) -> None + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer + + self.loader = loader + if self.loader is not None and getattr(self.loader, '_scanner', None) is None: + self.loader._scanner = self + self.reset_scanner() + self.first_time = False + self.yaml_version = None # type: Any + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def reset_scanner(self): + # type: () -> None + # Had we reached the end of the stream? + self.done = False + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # List of processed tokens that are not yet emitted. + self.tokens = [] # type: List[Any] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] # type: List[int] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} # type: Dict[Any, Any] + + @property + def reader(self): + # type: () -> Any + try: + return self._scanner_reader # type: ignore + except AttributeError: + if hasattr(self.loader, 'typ'): + self._scanner_reader = self.loader.reader # type: ignore + else: + self._scanner_reader = self.loader._reader # type: ignore + return self._scanner_reader + + @property + def scanner_processing_version(self): # prefix until un-composited + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver.processing_version # type: ignore + return self.loader.processing_version # type: ignore + + # Public methods. + + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + return self.tokens[0] + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + # type: () -> bool + if self.done: + return False + if len(self.tokens) == 0: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + return False + + def fetch_comment(self, comment): + # type: (Any) -> None + raise NotImplementedError + + def fetch_more_tokens(self): + # type: () -> Any + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + if comment is not None: # never happens for base scanner + return self.fetch_comment(comment) + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.reader.column) + + # Peek the next character. + ch = self.reader.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + # if ch == '\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == "'": + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError( + 'while scanning for the next token', + None, + _F('found character {ch!r} that cannot start any token', ch=ch), + self.reader.get_mark(), + ) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # type: () -> Any + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # type: () -> None + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.reader.line or self.reader.index - key.index > 1024: + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # type: () -> None + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.reader.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken + len(self.tokens) + key = SimpleKey( + token_number, + required, + self.reader.index, + self.reader.line, + self.reader.column, + self.reader.get_mark(), + ) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # type: () -> None + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + # type: (Any) -> None + # In flow context, tokens should respect indentation. + # Actually the condition should be `self.indent >= column` according to + # the spec. But this condition will prohibit intuitively correct + # constructions such as + # key : { + # } + # #### + # if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.reader.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if bool(self.flow_level): + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.reader.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # type: (int) -> bool + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # type: () -> None + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding)) + + def fetch_stream_end(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + # The steam is finished. + self.done = True + + def fetch_directive(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + # type: () -> None + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + # type: () -> None + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + # type: (Any) -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.reader.get_mark() + self.reader.forward(3) + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[') + + def fetch_flow_mapping_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{') + + def fetch_flow_collection_start(self, TokenClass, to_push): + # type: (Any, Text) -> None + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + # Increase the flow level. + self.flow_context.append(to_push) + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + # type: (Any) -> None + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Decrease the flow level. + try: + popped = self.flow_context.pop() # NOQA + except IndexError: + # We must not be in a list or object. + # Defer error handling to the parser. + pass + # No simple keys after ']' or '}'. + self.allow_simple_key = False + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + # type: () -> None + # Simple keys are allowed after ','. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Add FLOW-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'sequence entries are not allowed here', + self.reader.get_mark(), + ) + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + # Simple keys are allowed after '-'. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'mapping keys are not allowed here', + self.reader.get_mark(), + ) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + # type: () -> None + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert( + key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark) + ) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert( + key.token_number - self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark), + ) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'mapping values are not allowed here', + self.reader.get_mark(), + ) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + # type: () -> None + # ALIAS could be a simple key. + self.save_possible_simple_key() + # No simple keys after ALIAS. + self.allow_simple_key = False + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + # type: () -> None + # ANCHOR could start a simple key. + self.save_possible_simple_key() + # No simple keys after ANCHOR. + self.allow_simple_key = False + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + # type: () -> None + # TAG could start a simple key. + self.save_possible_simple_key() + # No simple keys after TAG. + self.allow_simple_key = False + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + # type: () -> None + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + # type: () -> None + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + # type: (Any) -> None + # A simple key may follow a block scalar. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + # type: () -> None + self.fetch_flow_scalar(style="'") + + def fetch_double(self): + # type: () -> None + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + # type: (Any) -> None + # A flow scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after flow scalars. + self.allow_simple_key = False + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + # type: () -> None + # A plain scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + # type: () -> Any + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.reader.column == 0: + return True + return None + + def check_document_start(self): + # type: () -> Any + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.reader.column == 0: + if ( + self.reader.prefix(3) == '---' + and self.reader.peek(3) in _THE_END_SPACE_TAB + ): + return True + return None + + def check_document_end(self): + # type: () -> Any + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.reader.column == 0: + if ( + self.reader.prefix(3) == '...' + and self.reader.peek(3) in _THE_END_SPACE_TAB + ): + return True + return None + + def check_block_entry(self): + # type: () -> Any + # BLOCK-ENTRY: '-' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_key(self): + # type: () -> Any + # KEY(flow context): '?' + if bool(self.flow_level): + return True + # KEY(block context): '?' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_value(self): + # type: () -> Any + # VALUE(flow context): ':' + if self.scanner_processing_version == (1, 1): + if bool(self.flow_level): + return True + else: + if bool(self.flow_level): + if self.flow_context[-1] == '[': + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + elif self.tokens and isinstance(self.tokens[-1], ValueToken): + # mapping flow context scanning a value token + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + return True + # VALUE(block context): ':' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_plain(self): + # type: () -> Any + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + srp = self.reader.peek + ch = srp() + if self.scanner_processing_version == (1, 1): + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or ( + srp(1) not in _THE_END_SPACE_TAB + and (ch == '-' or (not self.flow_level and ch in '?:')) + ) + # YAML 1.2 + if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`': + # ################### ^ ??? + return True + ch1 = srp(1) + if ch == '-' and ch1 not in _THE_END_SPACE_TAB: + return True + if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB: + return True + + return srp(1) not in _THE_END_SPACE_TAB and ( + ch == '-' or (not self.flow_level and ch in '?:') + ) + + # Scanners. + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + _the_end = _THE_END + while not found: + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _the_end: + srf() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + return None + + def scan_directive(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + start_mark = self.reader.get_mark() + srf() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.reader.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.reader.get_mark() + else: + end_mark = self.reader.get_mark() + while srp() not in _THE_END: + srf() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + length = 0 + srp = self.reader.peek + ch = srp(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.': + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_yaml_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + major = self.scan_yaml_directive_number(start_mark) + if srp() != '.': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + srf() + minor = self.scan_yaml_directive_number(start_mark) + if srp() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected a digit or '.', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + self.yaml_version = (major, minor) + return self.yaml_version + + def scan_yaml_directive_number(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + ch = srp() + if not ('0' <= ch <= '9'): + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected a digit, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + length = 0 + while '0' <= srp(length) <= '9': + length += 1 + value = int(self.reader.prefix(length)) + srf(length) + return value + + def scan_tag_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + handle = self.scan_tag_directive_handle(start_mark) + while srp() == ' ': + srf() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.reader.peek() + if ch != ' ': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_tag_directive_prefix(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.reader.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + return value + + def scan_directive_ignored_line(self, start_mark): + # type: (Any) -> None + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _THE_END: + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a directive', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # type: (Any) -> Any + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + srp = self.reader.peek + start_mark = self.reader.get_mark() + indicator = srp() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.reader.forward() + length = 0 + ch = srp(length) + # while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + # or ch in '-_': + while check_anchorname_char(ch): + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + # ch1 = ch + # ch = srp() # no need to peek, ch is already set + # assert ch1 == ch + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F('expected alphabetic or numeric character, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + end_mark = self.reader.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + start_mark = self.reader.get_mark() + ch = srp(1) + if ch == '<': + handle = None + self.reader.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if srp() != '>': + raise ScannerError( + 'while parsing a tag', + start_mark, + _F("expected '>', but found {srp_call!r}", srp_call=srp()), + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in _THE_END_SPACE_TAB: + handle = None + suffix = '!' + self.reader.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = srp(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.reader.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a tag', + start_mark, + _F("expected ' ', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + value = (handle, suffix) + end_mark = self.reader.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style, rt=False): + # type: (Any, Optional[bool]) -> Any + # See the specification for details. + srp = self.reader.peek + if style == '>': + folded = True + else: + folded = False + + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + + # Scan the header. + self.reader.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + # block scalar comment e.g. : |+ # comment text + block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent + 1 + if increment is None: + # no increment and top level, min_indent could be 0 + if min_indent < 1 and ( + style not in '|>' + or (self.scanner_processing_version == (1, 1)) + and getattr( + self.loader, + 'top_level_block_style_scalar_no_indent_error_1_1', + False, + ) + ): + min_indent = 1 + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + if min_indent < 1: + min_indent = 1 + indent = min_indent + increment - 1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = "" + + # Scan the inner part of the block scalar. + while self.reader.column == indent and srp() != '\0': + chunks.extend(breaks) + leading_non_space = srp() not in ' \t' + length = 0 + while srp(length) not in _THE_END: + length += 1 + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if style in '|>' and min_indent == 0: + # at the beginning of a line, if in block style see if + # end of document/start_new_document + if self.check_document_start() or self.check_document_end(): + break + if self.reader.column == indent and srp() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if rt and folded and line_break == '\n': + chunks.append('\a') + if ( + folded + and line_break == '\n' + and leading_non_space + and srp() not in ' \t' + ): + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + # if folded and line_break == '\n': + # if not breaks: + # if srp() not in ' \t': + # chunks.append(' ') + # else: + # chunks.append(line_break) + # else: + # chunks.append(line_break) + else: + break + + # Process trailing line breaks. The 'chomping' setting determines + # whether they are included in the value. + trailing = [] # type: List[Any] + if chomping in [None, True]: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + elif chomping in [None, False]: + trailing.extend(breaks) + + # We are done. + token = ScalarToken("".join(chunks), False, start_mark, end_mark, style) + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + if block_scalar_comment is not None: + token.add_pre_comments([block_scalar_comment]) + if len(trailing) > 0: + # Eat whitespaces and comments until we reach the next token. + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', None) + if comment_handler is not None: + line = end_mark.line - len(trailing) + for x in trailing: + assert x[-1] == '\n' + self.comments.add_blank_line(x, 0, line) # type: ignore + line += 1 + comment = self.scan_to_next_token() + while comment: + trailing.append(' ' * comment[1].column + comment[0]) + comment = self.scan_to_next_token() + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + # Keep track of the trailing whitespace and following comments + # as a comment token, if isn't all included in the actual value. + comment_end_mark = self.reader.get_mark() + comment = CommentToken( + "".join(trailing), end_mark, comment_end_mark + ) + token.add_post_comment(comment) + return token + + def scan_block_scalar_indicators(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + chomping = None + increment = None + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' + 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F( + 'expected chomping or indentation indicators, but found {ch!r}', + ch=ch, + ), + self.reader.get_mark(), + ) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = prefix + while srp() not in _THE_END: + comment += srp() + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + self.scan_line_break() + return comment + + def scan_block_scalar_indentation(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + max_indent = 0 + end_mark = self.reader.get_mark() + while srp() in ' \r\n\x85\u2028\u2029': + if srp() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + else: + srf() + if self.reader.column > max_indent: + max_indent = self.reader.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # type: (int) -> Any + # See the specification for details. + chunks = [] + srp = self.reader.peek + srf = self.reader.forward + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + while srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # type: (Any) -> Any + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + srp = self.reader.peek + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + quote = srp() + self.reader.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while srp() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.reader.forward() + end_mark = self.reader.get_mark() + return ScalarToken("".join(chunks), False, start_mark, end_mark, style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '"': '"', + '/': '/', # as per http://www.json.org/ + '\\': '\\', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8} + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + length = 0 + while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029': + length += 1 + if length != 0: + chunks.append(self.reader.prefix(length)) + srf(length) + ch = srp() + if not double and ch == "'" and srp(1) == "'": + chunks.append("'") + srf(2) + elif (double and ch == "'") or (not double and ch in '"\\'): + chunks.append(ch) + srf() + elif double and ch == '\\': + srf() + ch = srp() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + srf() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + srf() + for k in range(length): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + _F( + 'expected escape sequence of {length:d} hexdecimal ' + 'numbers, but found {srp_call!r}', + length=length, + srp_call=srp(k), + ), + self.reader.get_mark(), + ) + code = int(self.reader.prefix(length), 16) + chunks.append(chr(code)) + srf(length) + elif ch in '\n\r\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + _F('found unknown escape character {ch!r}', ch=ch), + self.reader.get_mark(), + ) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + chunks = [] + length = 0 + while srp(length) in ' \t': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch == '\0': + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected end of stream', + self.reader.get_mark(), + ) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected document separator', + self.reader.get_mark(), + ) + while srp() in ' \t': + srf() + if srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # type: () -> Any + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ': ' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + end_mark = start_mark + indent = self.indent + 1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + # if indent == 0: + # indent = 1 + spaces = [] # type: List[Any] + while True: + length = 0 + if srp() == '#': + break + while True: + ch = srp(length) + if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB: + pass + elif ch == '?' and self.scanner_processing_version != (1, 1): + pass + elif ( + ch in _THE_END_SPACE_TAB + or ( + not self.flow_level + and ch == ':' + and srp(length + 1) in _THE_END_SPACE_TAB + ) + or (self.flow_level and ch in ',:?[]{}') + ): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if ( + self.flow_level + and ch == ':' + and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}' + ): + srf(length) + raise ScannerError( + 'while scanning a plain scalar', + start_mark, + "found unexpected ':'", + self.reader.get_mark(), + 'Please check ' + 'http://pyyaml.org/wiki/YAMLColonInFlowContext ' + 'for details.', + ) + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.reader.prefix(length)) + srf(length) + end_mark = self.reader.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if ( + not spaces + or srp() == '#' + or (not self.flow_level and self.reader.column < indent) + ): + break + + token = ScalarToken("".join(chunks), True, start_mark, end_mark) + # getattr provides True so C type loader, which cannot handle comment, + # will not make CommentToken + if self.loader is not None: + comment_handler = getattr(self.loader, 'comment_handling', False) + if comment_handler is None: + if spaces and spaces[0] == '\n': + # Create a comment token to preserve the trailing line breaks. + comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark) + token.add_post_comment(comment) + elif comment_handler is not False: + line = start_mark.line + 1 + for ch in spaces: + if ch == '\n': + self.comments.add_blank_line('\n', 0, line) # type: ignore + line += 1 + + return token + + def scan_plain_spaces(self, indent, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + length = 0 + while srp(length) in ' ': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + breaks = [] + while srp() in ' \r\n\x85\u2028\u2029': + if srp() == ' ': + srf() + else: + breaks.append(self.scan_line_break()) + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp( + 3 + ) in _THE_END_SPACE_TAB: + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + srp = self.reader.peek + ch = srp() + if ch != '!': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F("expected '!', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + length = 1 + ch = srp(length) + if ch != ' ': + while ( + '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_' + ): + length += 1 + ch = srp(length) + if ch != '!': + self.reader.forward(length) + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F("expected '!', but found {ch!r}", ch=ch), + self.reader.get_mark(), + ) + length += 1 + value = self.reader.prefix(length) + self.reader.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # Note: we do not check if URI is well-formed. + srp = self.reader.peek + chunks = [] + length = 0 + ch = srp(length) + while ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in "-;/?:@&=+$,_.!~*'()[]%" + or ((self.scanner_processing_version > (1, 1)) and ch == '#') + ): + if ch == '%': + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = srp(length) + if length != 0: + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + if not chunks: + raise ScannerError( + _F('while parsing an {name!s}', name=name), + start_mark, + _F('expected URI, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + return "".join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + code_bytes = [] # type: List[Any] + mark = self.reader.get_mark() + while srp() == '%': + srf() + for k in range(2): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + _F('while scanning an {name!s}', name=name), + start_mark, + _F( + 'expected URI escape sequence of 2 hexdecimal numbers,' + ' but found {srp_call!r}', + srp_call=srp(k), + ), + self.reader.get_mark(), + ) + code_bytes.append(int(self.reader.prefix(2), 16)) + srf(2) + try: + value = bytes(code_bytes).decode('utf-8') + except UnicodeDecodeError as exc: + raise ScannerError( + _F('while scanning an {name!s}', name=name), start_mark, str(exc), mark + ) + return value + + def scan_line_break(self): + # type: () -> Any + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + return "" + + +class RoundTripScanner(Scanner): + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + return self.tokens[0] + return None + + def _gather_comments(self): + # type: () -> Any + """combine multiple comment lines and assign to next non-comment-token""" + comments = [] # type: List[Any] + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + comment = self.tokens.pop(0) + self.tokens_taken += 1 + comments.append(comment) + while self.need_more_tokens(): + self.fetch_more_tokens() + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + self.tokens_taken += 1 + comment = self.tokens.pop(0) + # nprint('dropping2', comment) + comments.append(comment) + if len(comments) >= 1: + self.tokens[0].add_pre_comments(comments) + # pull in post comment on e.g. ':' + if not self.done and len(self.tokens) < 2: + self.fetch_more_tokens() + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if len(self.tokens) > 0: + # nprint('tk', self.tokens) + # only add post comment to single line tokens: + # scalar, value token. FlowXEndToken, otherwise + # hidden streamtokens could get them (leave them and they will be + # pre comments for the next map/seq + if ( + len(self.tokens) > 1 + and isinstance( + self.tokens[0], + ( + ScalarToken, + ValueToken, + FlowSequenceEndToken, + FlowMappingEndToken, + ), + ) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens[0].add_post_comment(c) + elif ( + len(self.tokens) > 1 + and isinstance(self.tokens[0], ScalarToken) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + c.value = ( + '\n' * (c.start_mark.line - self.tokens[0].end_mark.line) + + (' ' * c.start_mark.column) + + c.value + ) + self.tokens[0].add_post_comment(c) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens_taken += 1 + return self.tokens.pop(0) + return None + + def fetch_comment(self, comment): + # type: (Any) -> None + value, start_mark, end_mark = comment + while value and value[-1] == ' ': + # empty line within indented key context + # no need to update end-mark, that is not used + value = value[:-1] + self.tokens.append(CommentToken(value, start_mark, end_mark)) + + # scanner + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + start_mark = self.reader.get_mark() + comment = ch + srf() + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # gather any blank lines following the comment too + ch = self.scan_line_break() + while len(ch) > 0: + comment += ch + ch = self.scan_line_break() + end_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + return comment, start_mark, end_mark + if self.scan_line_break() != '': + start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + end_mark = self.reader.get_mark() + return comment, start_mark, end_mark + else: + found = True + return None + + def scan_line_break(self, empty_line=False): + # type: (bool) -> Text + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() # type: Text + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + elif empty_line and ch in '\t ': + self.reader.forward() + return ch + return "" + + def scan_block_scalar(self, style, rt=True): + # type: (Any, Optional[bool]) -> Any + return Scanner.scan_block_scalar(self, style, rt=rt) + + +# commenthandling 2021, differentiatiation not needed + +VALUECMNT = 0 +KEYCMNT = 0 # 1 +# TAGCMNT = 2 +# ANCHORCMNT = 3 + + +class CommentBase: + __slots__ = ( + 'value', + 'line', + 'column', + 'used', + 'function', + 'fline', + 'ufun', + 'uline', + ) + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + self.value = value + self.line = line + self.column = column + self.used = ' ' + info = inspect.getframeinfo(inspect.stack()[3][0]) + self.function = info.function + self.fline = info.lineno + self.ufun = None + self.uline = None + + def set_used(self, v='+'): + # type: (Any) -> None + self.used = v + info = inspect.getframeinfo(inspect.stack()[1][0]) + self.ufun = info.function # type: ignore + self.uline = info.lineno # type: ignore + + def set_assigned(self): + # type: () -> None + self.used = '|' + + def __str__(self): + # type: () -> str + return _F('{value}', value=self.value) # type: ignore + + def __repr__(self): + # type: () -> str + return _F('{value!r}', value=self.value) # type: ignore + + def info(self): + # type: () -> str + return _F( # type: ignore + '{name}{used} {line:2}:{column:<2} "{value:40s} {function}:{fline} {ufun}:{uline}', + name=self.name, # type: ignore + line=self.line, + column=self.column, + value=self.value + '"', + used=self.used, + function=self.function, + fline=self.fline, + ufun=self.ufun, + uline=self.uline, + ) + + +class EOLComment(CommentBase): + name = 'EOLC' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class FullLineComment(CommentBase): + name = 'FULL' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class BlankLineComment(CommentBase): + name = 'BLNK' + + def __init__(self, value, line, column): + # type: (Any, Any, Any) -> None + super().__init__(value, line, column) + + +class ScannedComments: + def __init__(self): + # type: (Any) -> None + self.comments = {} # type: ignore + self.unused = [] # type: ignore + + def add_eol_comment(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + if comment.count('\n') == 1: + assert comment[-1] == '\n' + else: + assert '\n' not in comment + self.comments[line] = retval = EOLComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def add_blank_line(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + assert comment.count('\n') == 1 and comment[-1] == '\n' + assert line not in self.comments + self.comments[line] = retval = BlankLineComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def add_full_line_comment(self, comment, column, line): + # type: (Any, Any, Any) -> Any + # info = inspect.getframeinfo(inspect.stack()[1][0]) + assert comment.count('\n') == 1 and comment[-1] == '\n' + # if comment.startswith('# C12'): + # raise + # this raises in line 2127 fro 330 + self.comments[line] = retval = FullLineComment(comment[:-1], line, column) + self.unused.append(line) + return retval + + def __getitem__(self, idx): + # type: (Any) -> Any + return self.comments[idx] + + def __str__(self): + # type: () -> Any + return ( + 'ParsedComments:\n ' + + '\n '.join( + ( + _F('{lineno:2} {x}', lineno=lineno, x=x.info()) + for lineno, x in self.comments.items() + ) + ) + + '\n' + ) + + def last(self): + # type: () -> str + lineno, x = list(self.comments.items())[-1] + return _F('{lineno:2} {x}\n', lineno=lineno, x=x.info()) # type: ignore + + def any_unprocessed(self): + # type: () -> bool + # ToDo: might want to differentiate based on lineno + return len(self.unused) > 0 + # for lno, comment in reversed(self.comments.items()): + # if comment.used == ' ': + # return True + # return False + + def unprocessed(self, use=False): + # type: (Any) -> Any + while len(self.unused) > 0: + first = self.unused.pop(0) if use else self.unused[0] + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf( + 'using', first, self.comments[first].value, info.function, info.lineno + ) + yield first, self.comments[first] + if use: + self.comments[first].set_used() + + def assign_pre(self, token): + # type: (Any) -> Any + token_line = token.start_mark.line + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf('assign_pre', token_line, self.unused, info.function, info.lineno) + gobbled = False + while self.unused and self.unused[0] < token_line: + gobbled = True + first = self.unused.pop(0) + xprintf('assign_pre < ', first) + self.comments[first].set_used() + token.add_comment_pre(first) + return gobbled + + def assign_eol(self, tokens): + # type: (Any) -> Any + try: + comment_line = self.unused[0] + except IndexError: + return + if not isinstance(self.comments[comment_line], EOLComment): + return + idx = 1 + while tokens[-idx].start_mark.line > comment_line or isinstance( + tokens[-idx], ValueToken + ): + idx += 1 + xprintf('idx1', idx) + if ( + len(tokens) > idx + and isinstance(tokens[-idx], ScalarToken) + and isinstance(tokens[-(idx + 1)], ScalarToken) + ): + return + try: + if isinstance(tokens[-idx], ScalarToken) and isinstance( + tokens[-(idx + 1)], KeyToken + ): + try: + eol_idx = self.unused.pop(0) + self.comments[eol_idx].set_used() + xprintf('>>>>>a', idx, eol_idx, KEYCMNT) + tokens[-idx].add_comment_eol(eol_idx, KEYCMNT) + except IndexError: + raise NotImplementedError + return + except IndexError: + xprintf('IndexError1') + pass + try: + if isinstance(tokens[-idx], ScalarToken) and isinstance( + tokens[-(idx + 1)], (ValueToken, BlockEntryToken) + ): + try: + eol_idx = self.unused.pop(0) + self.comments[eol_idx].set_used() + tokens[-idx].add_comment_eol(eol_idx, VALUECMNT) + except IndexError: + raise NotImplementedError + return + except IndexError: + xprintf('IndexError2') + pass + for t in tokens: + xprintf('tt-', t) + xprintf('not implemented EOL', type(tokens[-idx])) + import sys + + sys.exit(0) + + def assign_post(self, token): + # type: (Any) -> Any + token_line = token.start_mark.line + info = inspect.getframeinfo(inspect.stack()[1][0]) + xprintf('assign_post', token_line, self.unused, info.function, info.lineno) + gobbled = False + while self.unused and self.unused[0] < token_line: + gobbled = True + first = self.unused.pop(0) + xprintf('assign_post < ', first) + self.comments[first].set_used() + token.add_comment_post(first) + return gobbled + + def str_unprocessed(self): + # type: () -> Any + return ''.join( + ( + _F(' {ind:2} {x}\n', ind=ind, x=x.info()) + for ind, x in self.comments.items() + if x.used == ' ' + ) + ) + + +class RoundTripScannerSC(Scanner): # RoundTripScanner Split Comments + def __init__(self, *arg, **kw): + # type: (Any, Any) -> None + super().__init__(*arg, **kw) + assert self.loader is not None + # comments isinitialised on .need_more_tokens and persist on + # self.loader.parsed_comments + self.comments = None + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if len(self.tokens) > 0: + if isinstance(self.tokens[0], BlockEndToken): + self.comments.assign_post(self.tokens[0]) # type: ignore + else: + self.comments.assign_pre(self.tokens[0]) # type: ignore + self.tokens_taken += 1 + return self.tokens.pop(0) + + def need_more_tokens(self): + # type: () -> bool + if self.comments is None: + self.loader.parsed_comments = self.comments = ScannedComments() # type: ignore + if self.done: + return False + if len(self.tokens) == 0: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + if len(self.tokens) < 2: + return True + if self.tokens[0].start_mark.line == self.tokens[-1].start_mark.line: + return True + if True: + xprintf('-x--', len(self.tokens)) + for t in self.tokens: + xprintf(t) + # xprintf(self.comments.last()) + xprintf(self.comments.str_unprocessed()) # type: ignore + self.comments.assign_pre(self.tokens[0]) # type: ignore + self.comments.assign_eol(self.tokens) # type: ignore + return False + + def scan_to_next_token(self): + # type: () -> None + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + start_mark = self.reader.get_mark() + # xprintf('current_mark', start_mark.line, start_mark.column) + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + comment_start_mark = self.reader.get_mark() + comment = ch + srf() # skipt the '#' + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # we have a comment + if start_mark.column == 0: + self.comments.add_full_line_comment( # type: ignore + comment, comment_start_mark.column, comment_start_mark.line + ) + else: + self.comments.add_eol_comment( # type: ignore + comment, comment_start_mark.column, comment_start_mark.line + ) + comment = "" + # gather any blank lines or full line comments following the comment as well + self.scan_empty_or_full_line_comments() + if not self.flow_level: + self.allow_simple_key = True + return + if bool(self.scan_line_break()): + # start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + self.scan_empty_or_full_line_comments() + return None + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + _ = self.reader.get_mark() # gobble end_mark + return None + else: + found = True + return None + + def scan_empty_or_full_line_comments(self): + # type: () -> None + blmark = self.reader.get_mark() + assert blmark.column == 0 + blanks = "" + comment = None + mark = None + ch = self.reader.peek() + while True: + # nprint('ch', repr(ch), self.reader.get_mark().column) + if ch in '\r\n\x85\u2028\u2029': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + if comment is not None: + comment += '\n' + self.comments.add_full_line_comment(comment, mark.column, mark.line) + comment = None + else: + blanks += '\n' + self.comments.add_blank_line(blanks, blmark.column, blmark.line) # type: ignore # NOQA + blanks = "" + blmark = self.reader.get_mark() + ch = self.reader.peek() + continue + if comment is None: + if ch in ' \t': + blanks += ch + elif ch == '#': + mark = self.reader.get_mark() + comment = '#' + else: + # print('breaking on', repr(ch)) + break + else: + comment += ch + self.reader.forward() + ch = self.reader.peek() + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = '' + mark = self.reader.get_mark() + while srp() not in _THE_END: + comment += srp() + srf() + comment += '\n' # type: ignore + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + _F('expected a comment or a line break, but found {ch!r}', ch=ch), + self.reader.get_mark(), + ) + if comment is not None: + self.comments.add_eol_comment(comment, mark.column, mark.line) # type: ignore + self.scan_line_break() + return None diff --git a/lib/ruyaml/serializer.py b/lib/ruyaml/serializer.py new file mode 100644 index 0000000..c92649d --- /dev/null +++ b/lib/ruyaml/serializer.py @@ -0,0 +1,251 @@ +# coding: utf-8 + +from ruyaml.compat import DBG_NODE, dbg, nprint, nprintf # NOQA +from ruyaml.error import YAMLError +from ruyaml.events import ( + AliasEvent, + DocumentEndEvent, + DocumentStartEvent, + MappingEndEvent, + MappingStartEvent, + ScalarEvent, + SequenceEndEvent, + SequenceStartEvent, + StreamEndEvent, + StreamStartEvent, +) +from ruyaml.nodes import MappingNode, ScalarNode, SequenceNode +from ruyaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, Optional, Text, Union # NOQA + + from ruyaml.compat import VersionType # NOQA + +__all__ = ['Serializer', 'SerializerError'] + + +class SerializerError(YAMLError): + pass + + +class Serializer: + + # 'id' and 3+ numbers, but not 000 + ANCHOR_TEMPLATE = 'id%03d' + ANCHOR_RE = RegExp('id(?!000$)\\d{3,}') + + def __init__( + self, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + dumper=None, + ): + # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None: + self.dumper._serializer = self + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + if isinstance(version, str): + self.use_version = tuple(map(int, version.split('.'))) + else: + self.use_version = version # type: ignore + self.use_tags = tags + self.serialized_nodes = {} # type: Dict[Any, Any] + self.anchors = {} # type: Dict[Any, Any] + self.last_anchor_id = 0 + self.closed = None # type: Optional[bool] + self._templated_id = None + + @property + def emitter(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + return self.dumper.emitter # type: ignore + return self.dumper._emitter # type: ignore + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + self.dumper.resolver # type: ignore + return self.dumper._resolver # type: ignore + + def open(self): + # type: () -> None + if self.closed is None: + self.emitter.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError('serializer is closed') + else: + raise SerializerError('serializer is already opened') + + def close(self): + # type: () -> None + if self.closed is None: + raise SerializerError('serializer is not opened') + elif not self.closed: + self.emitter.emit(StreamEndEvent()) + self.closed = True + + # def __del__(self): + # self.close() + + def serialize(self, node): + # type: (Any) -> None + if dbg(DBG_NODE): + nprint('Serializing nodes') + node.dump() + if self.closed is None: + raise SerializerError('serializer is not opened') + elif self.closed: + raise SerializerError('serializer is closed') + self.emitter.emit( + DocumentStartEvent( + explicit=self.use_explicit_start, + version=self.use_version, + tags=self.use_tags, + ) + ) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + # type: (Any) -> None + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + anchor = None + try: + if node.anchor.always_dump: + anchor = node.anchor.value + except: # NOQA + pass + self.anchors[node] = anchor + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + # type: (Any) -> Any + try: + anchor = node.anchor.value + except: # NOQA + anchor = None + if anchor is None: + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + return anchor + + def serialize_node(self, node, parent, index): + # type: (Any, Any, Any) -> None + alias = self.anchors[node] + if node in self.serialized_nodes: + node_style = getattr(node, 'style', None) + if node_style != '?': + node_style = None + self.emitter.emit(AliasEvent(alias, style=node_style)) + else: + self.serialized_nodes[node] = True + self.resolver.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + # here check if the node.tag equals the one that would result from parsing + # if not equal quoting is necessary for strings + detected_tag = self.resolver.resolve( + ScalarNode, node.value, (True, False) + ) + default_tag = self.resolver.resolve( + ScalarNode, node.value, (False, True) + ) + implicit = ( + (node.tag == detected_tag), + (node.tag == default_tag), + node.tag.startswith('tag:yaml.org,2002:'), + ) + self.emitter.emit( + ScalarEvent( + alias, + node.tag, + implicit, + node.value, + style=node.style, + comment=node.comment, + ) + ) + elif isinstance(node, SequenceNode): + implicit = node.tag == self.resolver.resolve( + SequenceNode, node.value, True + ) + comment = node.comment + end_comment = None + seq_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + seq_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + else: + end_comment = None + self.emitter.emit( + SequenceStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + ) + ) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment])) + elif isinstance(node, MappingNode): + implicit = node.tag == self.resolver.resolve( + MappingNode, node.value, True + ) + comment = node.comment + end_comment = None + map_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + map_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + self.emitter.emit( + MappingStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + nr_items=len(node.value), + ) + ) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment])) + self.resolver.ascend_resolver() + + +def templated_id(s): + # type: (Text) -> Any + return Serializer.ANCHOR_RE.match(s) diff --git a/lib/ruyaml/timestamp.py b/lib/ruyaml/timestamp.py new file mode 100644 index 0000000..6153366 --- /dev/null +++ b/lib/ruyaml/timestamp.py @@ -0,0 +1,65 @@ +# coding: utf-8 + +import copy +import datetime + +# ToDo: you could probably attach the tzinfo correctly to the object +# a more complete datetime might be used by safe loading as well + +if False: # MYPY + from typing import Any, Dict, List, Optional # NOQA + + +class TimeStamp(datetime.datetime): + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any] + + def __new__(cls, *args, **kw): # datetime is immutable + # type: (Any, Any) -> Any + return datetime.datetime.__new__(cls, *args, **kw) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + ts = TimeStamp( + self.year, self.month, self.day, self.hour, self.minute, self.second + ) + ts._yaml = copy.deepcopy(self._yaml) + return ts + + def replace( + self, + year=None, + month=None, + day=None, + hour=None, + minute=None, + second=None, + microsecond=None, + tzinfo=True, + fold=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any, Any) -> Any + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + ts = type(self)( + year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold + ) + ts._yaml = copy.deepcopy(self._yaml) + return ts diff --git a/lib/ruyaml/tokens.py b/lib/ruyaml/tokens.py new file mode 100644 index 0000000..d697b70 --- /dev/null +++ b/lib/ruyaml/tokens.py @@ -0,0 +1,413 @@ +# coding: utf-8 + +from typing import Any + +from ruyaml.compat import _F +from ruyaml.error import StreamMark + +SHOW_LINES = True + + +class Token: + __slots__ = 'start_mark', 'end_mark', '_comment' + + def __init__( + self, + start_mark: StreamMark, + end_mark: StreamMark, + ) -> None: + self.start_mark = start_mark + self.end_mark = end_mark + + def __repr__(self): + # type: () -> Any + # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and + # hasattr('self', key)] + attributes = [key for key in self.__slots__ if not key.endswith('_mark')] + attributes.sort() + # arguments = ', '.join( + # [_F('{key!s}={gattr!r})', key=key, gattr=getattr(self, key)) for key in attributes] + # ) + arguments = [ + _F('{key!s}={gattr!r}', key=key, gattr=getattr(self, key)) + for key in attributes + ] + if SHOW_LINES: + try: + arguments.append('line: ' + str(self.start_mark.line)) + except: # NOQA + pass + try: + arguments.append('comment: ' + str(self._comment)) + except: # NOQA + pass + return '{}({})'.format(self.__class__.__name__, ', '.join(arguments)) + + @property + def column(self): + # type: () -> int + return self.start_mark.column + + @column.setter + def column(self, pos): + # type: (Any) -> None + self.start_mark.column = pos + + # old style ( <= 0.17) is a TWO element list with first being the EOL + # comment concatenated with following FLC/BLNK; and second being a list of FLC/BLNK + # preceding the token + # new style ( >= 0.17 ) is a THREE element list with the first being a list of + # preceding FLC/BLNK, the second EOL and the third following FLC/BLNK + # note that new style has differing order, and does not consist of CommentToken(s) + # but of CommentInfo instances + # any non-assigned values in new style are None, but first and last can be empty list + # new style routines add one comment at a time + + # going to be deprecated in favour of add_comment_eol/post + def add_post_comment(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + else: + assert len(self._comment) in [2, 5] # make sure it is version 0 + # if isinstance(comment, CommentToken): + # if comment.value.startswith('# C09'): + # raise + self._comment[0] = comment + + # going to be deprecated in favour of add_comment_pre + def add_pre_comments(self, comments): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + else: + assert len(self._comment) == 2 # make sure it is version 0 + assert self._comment[1] is None + self._comment[1] = comments + return + + # new style + def add_comment_pre(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [[], None, None] # type: ignore + else: + assert len(self._comment) == 3 + if self._comment[0] is None: + self._comment[0] = [] # type: ignore + self._comment[0].append(comment) # type: ignore + + def add_comment_eol(self, comment, comment_type): + # type: (Any, Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None, None] + else: + assert len(self._comment) == 3 + assert self._comment[1] is None + if self.comment[1] is None: + self._comment[1] = [] # type: ignore + self._comment[1].extend([None] * (comment_type + 1 - len(self.comment[1]))) # type: ignore # NOQA + # nprintf('commy', self.comment, comment_type) + self._comment[1][comment_type] = comment # type: ignore + + def add_comment_post(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None, []] # type: ignore + else: + assert len(self._comment) == 3 + if self._comment[2] is None: + self._comment[2] = [] # type: ignore + self._comment[2].append(comment) # type: ignore + + # def get_comment(self): + # # type: () -> Any + # return getattr(self, '_comment', None) + + @property + def comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + def move_old_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], None, None, c[0]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + if c[0] and tc[0] or c[1] and tc[1]: + raise NotImplementedError(_F('overlap in comment {c!r} {tc!r}', c=c, tc=tc)) + if c[0]: + tc[0] = c[0] + if c[1]: + tc[1] = c[1] + return self + + def split_old_comment(self): + # type: () -> Any + """split the post part of a comment, and return it + as comment to be added. Delete second part if [None, None] + abc: # this goes to sequence + # this goes to first element + - first element + """ + comment = self.comment + if comment is None or comment[0] is None: + return None # nothing to do + ret_val = [comment[0], None] + if comment[1] is None: + delattr(self, '_comment') + return ret_val + + def move_new_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], c[2]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + # if self and target have both pre, eol or post comments, something seems wrong + for idx in range(3): + if c[idx] is not None and tc[idx] is not None: + raise NotImplementedError( + _F('overlap in comment {c!r} {tc!r}', c=c, tc=tc) + ) + # move the comment parts + for idx in range(3): + if c[idx]: + tc[idx] = c[idx] + return self + + +# class BOMToken(Token): +# id = '' + + +class DirectiveToken(Token): + __slots__ = 'name', 'value' + id = '' + + def __init__(self, name, value, start_mark, end_mark): + # type: (Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.name = name + self.value = value + + +class DocumentStartToken(Token): + __slots__ = () + id = '' + + +class DocumentEndToken(Token): + __slots__ = () + id = '' + + +class StreamStartToken(Token): + __slots__ = ('encoding',) + id = '' + + def __init__( + self, + start_mark: StreamMark, + end_mark: StreamMark, + encoding: Any = None, + ) -> None: + Token.__init__(self, start_mark, end_mark) + self.encoding = encoding + + +class StreamEndToken(Token): + __slots__ = () + id = '' + + +class BlockSequenceStartToken(Token): + __slots__ = () + id = '' + + +class BlockMappingStartToken(Token): + __slots__ = () + id = '' + + +class BlockEndToken(Token): + __slots__ = () + id = '' + + +class FlowSequenceStartToken(Token): + __slots__ = () + id = '[' + + +class FlowMappingStartToken(Token): + __slots__ = () + id = '{' + + +class FlowSequenceEndToken(Token): + __slots__ = () + id = ']' + + +class FlowMappingEndToken(Token): + __slots__ = () + id = '}' + + +class KeyToken(Token): + __slots__ = () + id = '?' + + # def x__repr__(self): + # return 'KeyToken({})'.format( + # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0]) + + +class ValueToken(Token): + __slots__ = () + id = ':' + + +class BlockEntryToken(Token): + __slots__ = () + id = '-' + + +class FlowEntryToken(Token): + __slots__ = () + id = ',' + + +class AliasToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class AnchorToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class TagToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class ScalarToken(Token): + __slots__ = 'value', 'plain', 'style' + id = '' + + def __init__(self, value, plain, start_mark, end_mark, style=None): + # type: (Any, Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + self.plain = plain + self.style = style + + +class CommentToken(Token): + __slots__ = '_value', 'pre_done' + id = '' + + def __init__(self, value, start_mark=None, end_mark=None, column=None): + # type: (Any, Any, Any, Any) -> None + if start_mark is None: + assert column is not None + self._column = column + Token.__init__(self, start_mark, None) # type: ignore + self._value = value + + @property + def value(self): + # type: () -> str + if isinstance(self._value, str): + return self._value + return "".join(self._value) + + @value.setter + def value(self, val): + # type: (Any) -> None + self._value = val + + def reset(self): + # type: () -> None + if hasattr(self, 'pre_done'): + delattr(self, 'pre_done') + + def __repr__(self): + # type: () -> Any + v = '{!r}'.format(self.value) + if SHOW_LINES: + try: + v += ', line: ' + str(self.start_mark.line) + except: # NOQA + pass + try: + v += ', col: ' + str(self.start_mark.column) + except: # NOQA + pass + return 'CommentToken({})'.format(v) + + def __eq__(self, other): + # type: (Any) -> bool + if self.start_mark != other.start_mark: + return False + if self.end_mark != other.end_mark: + return False + if self.value != other.value: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) diff --git a/lib/ruyaml/util.py b/lib/ruyaml/util.py new file mode 100644 index 0000000..60e5293 --- /dev/null +++ b/lib/ruyaml/util.py @@ -0,0 +1,247 @@ +# coding: utf-8 + +""" +some helper functions that might be generally useful +""" + +import datetime +import re +from functools import partial +from typing import Any + +if False: # MYPY + from typing import Any, Dict, List, Optional, Text # NOQA + + from .compat import StreamTextType # NOQA + + +class LazyEval: + """ + Lightweight wrapper around lazily evaluated func(*args, **kwargs). + + func is only evaluated when any attribute of its return value is accessed. + Every attribute access is passed through to the wrapped value. + (This only excludes special cases like method-wrappers, e.g., __hash__.) + The sole additional attribute is the lazy_self function which holds the + return value (or, prior to evaluation, func and arguments), in its closure. + """ + + def __init__(self, func, *args, **kwargs): + # type: (Any, Any, Any) -> None + def lazy_self(): + # type: () -> Any + return_value = func(*args, **kwargs) + object.__setattr__(self, 'lazy_self', lambda: return_value) + return return_value + + object.__setattr__(self, 'lazy_self', lazy_self) + + def __getattribute__(self, name): + # type: (Any) -> Any + lazy_self = object.__getattribute__(self, 'lazy_self') + if name == 'lazy_self': + return lazy_self + return getattr(lazy_self(), name) + + def __setattr__(self, name, value): + # type: (Any, Any) -> None + setattr(self.lazy_self(), name, value) + + +RegExp = partial(LazyEval, re.compile) + +timestamp_regexp = RegExp( + """^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:((?P[Tt])|[ \\t]+) # explictly not retaining extra spaces + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\\.(?P[0-9]*))? + (?:[ \\t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$""", + re.X, +) + + +def create_timestamp( + year, month, day, t, hour, minute, second, fraction, tz, tz_sign, tz_hour, tz_minute +): + year = int(year) + month = int(month) + day = int(day) + if not hour: + return datetime.date(year, month, day) + hour = int(hour) + minute = int(minute) + second = int(second) + if fraction: + frac = 0 + frac_s = fraction[:6] + while len(frac_s) < 6: + frac_s += '0' + frac = int(frac_s) + if len(fraction) > 6 and int(fraction[6]) > 4: + frac += 1 + fraction = frac + else: + fraction = 0 + delta = None + if tz_sign: + tz_hour = int(tz_hour) + tz_minute = int(tz_minute) if tz_minute else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if tz_sign == '-': + delta = -delta + # should do something else instead (or hook this up to the preceding if statement + # in reverse + # if delta is None: + # return datetime.datetime(year, month, day, hour, minute, second, fraction) + # return datetime.datetime(year, month, day, hour, minute, second, fraction, + # datetime.timezone.utc) + # the above is not good enough though, should provide tzinfo. In Python3 that is easily + # doable drop that kind of support for Python2 as it has not native tzinfo + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + +# originally as comment +# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605 +# if you use this in your code, I suggest adding a test in your test suite +# that check this routines output against a known piece of your YAML +# before upgrades to this code break your round-tripped YAML +def load_yaml_guess_indent(stream): + # type: (StreamTextType) -> Any + """guess the indent and block sequence indent of yaml stream/string + + returns round_trip_loaded stream, indent level, block sequence indent + - block sequence indent is the number of spaces before a dash relative to previous indent + - if there are no block sequences, indent is taken from nested mappings, block sequence + indent is unset (None) in that case + """ + from .main import YAML + + # load a YAML document, guess the indentation, if you use TABs you are on your own + def leading_spaces(line): + # type: (Any) -> int + idx = 0 + while idx < len(line) and line[idx] == ' ': + idx += 1 + return idx + + if isinstance(stream, str): + yaml_str = stream # type: Any + elif isinstance(stream, bytes): + # most likely, but the Reader checks BOM for this + yaml_str = stream.decode('utf-8') + else: + yaml_str = stream.read() + map_indent = None + indent = None # default if not found for some reason + block_seq_indent = None + prev_line_key_only = None + key_indent = 0 + for line in yaml_str.splitlines(): + rline = line.rstrip() + lline = rline.lstrip() + if lline.startswith('- '): + l_s = leading_spaces(line) + block_seq_indent = l_s - key_indent + idx = l_s + 1 + while line[idx] == ' ': # this will end as we rstripped + idx += 1 + if line[idx] == '#': # comment after - + continue + indent = idx - key_indent + break + if map_indent is None and prev_line_key_only is not None and rline: + idx = 0 + while line[idx] in ' -': + idx += 1 + if idx > prev_line_key_only: + map_indent = idx - prev_line_key_only + if rline.endswith(':'): + key_indent = leading_spaces(line) + idx = 0 + while line[idx] == ' ': # this will end on ':' + idx += 1 + prev_line_key_only = idx + continue + prev_line_key_only = None + if indent is None and map_indent is not None: + indent = map_indent + yaml = YAML() + return yaml.load(yaml_str), indent, block_seq_indent # type: ignore + + +def configobj_walker(cfg): + # type: (Any) -> Any + """ + walks over a ConfigObj (INI file with comments) generating + corresponding YAML output (including comments + """ + from configobj import ConfigObj # type: ignore + + assert isinstance(cfg, ConfigObj) + for c in cfg.initial_comment: + if c.strip(): + yield c + for s in _walk_section(cfg): + if s.strip(): + yield s + for c in cfg.final_comment: + if c.strip(): + yield c + + +def _walk_section(s, level=0): + # type: (Any, int) -> Any + from configobj import Section + + assert isinstance(s, Section) + indent = ' ' * level + for name in s.scalars: + for c in s.comments[name]: + yield indent + c.strip() + x = s[name] + if '\n' in x: + i = indent + ' ' + x = '|\n' + i + x.strip().replace('\n', '\n' + i) + elif ':' in x: + x = "'" + x.replace("'", "''") + "'" + line = '{0}{1}: {2}'.format(indent, name, x) + c = s.inline_comments[name] + if c: + line += ' ' + c + yield line + for name in s.sections: + for c in s.comments[name]: + yield indent + c.strip() + line = '{0}{1}:'.format(indent, name) + c = s.inline_comments[name] + if c: + line += ' ' + c + yield line + for val in _walk_section(s[name], level=level + 1): + yield val + + +# def config_obj_2_rt_yaml(cfg): +# from .comments import CommentedMap, CommentedSeq +# from configobj import ConfigObj +# assert isinstance(cfg, ConfigObj) +# #for c in cfg.initial_comment: +# # if c.strip(): +# # pass +# cm = CommentedMap() +# for name in s.sections: +# cm[name] = d = CommentedMap() +# +# +# #for c in cfg.final_comment: +# # if c.strip(): +# # yield c +# return cm diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0472fb0 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,27 @@ +[build-system] +requires = [ + "pip >= 19.3.1", + "setuptools >= 42", + "setuptools_scm[toml] >= 3.5.0", + "setuptools_scm_git_archive >= 1.1", + "wheel >= 0.33.6", +] +build-backend = "setuptools.build_meta" + +[tool.black] +skip-string-normalization = true + +[tool.isort] +profile = "black" +# known_first_party = "foo" + +[tool.pytest.ini_options] +# ensure we treat warnings as error +filterwarnings = [ + # "error", + "error::DeprecationWarning", + "error::PendingDeprecationWarning" +] + +[tool.setuptools_scm] +local_scheme = "no-local-version" diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..9dc0015 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,76 @@ +[metadata] +name = ruyaml +url = https://github.com/pycontribs/ruyaml +project_urls = + Bug Tracker = https://github.com/pycontribs/ruyaml/issues + Release Management = https://github.com/pycontribs/ruyaml/releases + Source Code = https://github.com/pycontribs/ruyaml +description = ruyaml is a fork of ruamel.yaml +long_description = file: README.rst +long_description_content_type = text/x-rst; charset=UTF-8 + +history = file: CHANGES +author = ruyaml Contributors +author_email = pycontribs@googlegroups.com +maintainer = Sorin Sbarnea +maintainer_email = sorin.sbarnea@gmail.com +license = MIT license +license_file = LICENSE +classifiers = + Development Status :: 5 - Production/Stable + + Environment :: Console + + Intended Audience :: Developers + Intended Audience :: Information Technology + Intended Audience :: System Administrators + + License :: OSI Approved :: MIT License + + Natural Language :: English + + Operating System :: OS Independent + + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 + + Topic :: Utilities +keywords = + selinux + virtualenv + +[files] +packages = + ruyaml + +[options] +use_scm_version = True +python_requires = >=3.6 +package_dir = + = lib +packages = find: +include_package_data = True +zip_safe = True +install_requires = + distro>=1.3.0 + setuptools>=39.0 + +[options.extras_require] +docs = + Sphinx + +[options.package_data] +ruyaml = + py.typed + +[options.packages.find] +where = lib + +[flake8] +show-source = True +max-line-length = 95 +ignore = W503,F405,E203,E402 +exclude = _test/lib,.eggs,.hg,.git,.tox,dist,.cache,__pycache__,ruyaml.egg-info diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..0c0bec0 --- /dev/null +++ b/tox.ini @@ -0,0 +1,67 @@ +[tox] +minversion = 3.16.1 +envlist = + linters + docs + packaging + py{36,37,38,39,310} +isolated_build = true +requires = + setuptools >= 41.4.0 + pip >= 19.3.0 +skip_missing_interpreters = False + +[testenv] +description = Unittest using {basepython} +commands = + /bin/bash -c 'pytest _test/test_*.py' +deps = + pytest +allowlist_externals = + make + sh + +[testenv:docs] +description = Build docs +basepython = python3.8 +deps = + --editable .[docs] +commands = + make singlehtml +changedir = {toxinidir}/_doc + +[testenv:linters] +description = Linting +basepython = python3.8 +deps = + pre-commit>=2.8.2 + flake8 + flake8-bugbear +commands = + pre-commit run -a + +[testenv:packaging] +description = + Do packaging/distribution +# `usedevelop = true` overrides `skip_install` instruction, it's unwanted +usedevelop = false +# don't install package itself in this env +skip_install = true +deps = + build >= 0.7.0 + twine >= 3.7.0 +setenv = +commands = + # build wheel and sdist using PEP-517 + {envpython} -c 'import os.path, shutil, sys; \ + dist_dir = os.path.join("{toxinidir}", "dist"); \ + os.path.isdir(dist_dir) or sys.exit(0); \ + print("Removing \{!s\} contents...".format(dist_dir), file=sys.stderr); \ + shutil.rmtree(dist_dir)' + {envpython} -m build \ + --outdir {toxinidir}/dist/ \ + {toxinidir} + # Validate metadata using twine + twine check --strict {toxinidir}/dist/* + # Install the wheel + sh -c "python3 -m pip install --force-reinstall {toxinidir}/dist/*.whl" -- cgit v1.2.3